1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2017 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
33 #include "stringpool.h"
40 #include "diagnostic-core.h"
41 #include "insn-attr.h"
44 #include "fold-const.h"
46 #include "stor-layout.h"
48 #include "print-tree.h"
54 #include "common/common-target.h"
55 #include "langhooks.h"
57 #include "sched-int.h"
59 #include "gimple-fold.h"
60 #include "gimple-iterator.h"
61 #include "gimple-ssa.h"
62 #include "gimple-walk.h"
65 #include "tm-constrs.h"
66 #include "tree-vectorizer.h"
67 #include "target-globals.h"
70 #include "tree-pass.h"
73 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
76 #include "gstab.h" /* for N_SLINE */
78 #include "case-cfn-macros.h"
80 #include "tree-ssa-propagate.h"
82 /* This file should be included last. */
83 #include "target-def.h"
85 #ifndef TARGET_NO_PROTOTYPE
86 #define TARGET_NO_PROTOTYPE 0
89 #define min(A,B) ((A) < (B) ? (A) : (B))
90 #define max(A,B) ((A) > (B) ? (A) : (B))
92 static pad_direction
rs6000_function_arg_padding (machine_mode
, const_tree
);
94 /* Structure used to define the rs6000 stack */
95 typedef struct rs6000_stack
{
96 int reload_completed
; /* stack info won't change from here on */
97 int first_gp_reg_save
; /* first callee saved GP register used */
98 int first_fp_reg_save
; /* first callee saved FP register used */
99 int first_altivec_reg_save
; /* first callee saved AltiVec register used */
100 int lr_save_p
; /* true if the link reg needs to be saved */
101 int cr_save_p
; /* true if the CR reg needs to be saved */
102 unsigned int vrsave_mask
; /* mask of vec registers to save */
103 int push_p
; /* true if we need to allocate stack space */
104 int calls_p
; /* true if the function makes any calls */
105 int world_save_p
; /* true if we're saving *everything*:
106 r13-r31, cr, f14-f31, vrsave, v20-v31 */
107 enum rs6000_abi abi
; /* which ABI to use */
108 int gp_save_offset
; /* offset to save GP regs from initial SP */
109 int fp_save_offset
; /* offset to save FP regs from initial SP */
110 int altivec_save_offset
; /* offset to save AltiVec regs from initial SP */
111 int lr_save_offset
; /* offset to save LR from initial SP */
112 int cr_save_offset
; /* offset to save CR from initial SP */
113 int vrsave_save_offset
; /* offset to save VRSAVE from initial SP */
114 int varargs_save_offset
; /* offset to save the varargs registers */
115 int ehrd_offset
; /* offset to EH return data */
116 int ehcr_offset
; /* offset to EH CR field data */
117 int reg_size
; /* register size (4 or 8) */
118 HOST_WIDE_INT vars_size
; /* variable save area size */
119 int parm_size
; /* outgoing parameter size */
120 int save_size
; /* save area size */
121 int fixed_size
; /* fixed size of stack frame */
122 int gp_size
; /* size of saved GP registers */
123 int fp_size
; /* size of saved FP registers */
124 int altivec_size
; /* size of saved AltiVec registers */
125 int cr_size
; /* size to hold CR if not in fixed area */
126 int vrsave_size
; /* size to hold VRSAVE */
127 int altivec_padding_size
; /* size of altivec alignment padding */
128 HOST_WIDE_INT total_size
; /* total bytes allocated for stack */
132 /* A C structure for machine-specific, per-function data.
133 This is added to the cfun structure. */
134 typedef struct GTY(()) machine_function
136 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
137 int ra_needs_full_frame
;
138 /* Flags if __builtin_return_address (0) was used. */
140 /* Cache lr_save_p after expansion of builtin_eh_return. */
142 /* Whether we need to save the TOC to the reserved stack location in the
143 function prologue. */
144 bool save_toc_in_prologue
;
145 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
146 varargs save area. */
147 HOST_WIDE_INT varargs_save_offset
;
148 /* Alternative internal arg pointer for -fsplit-stack. */
149 rtx split_stack_arg_pointer
;
150 bool split_stack_argp_used
;
151 /* Flag if r2 setup is needed with ELFv2 ABI. */
152 bool r2_setup_needed
;
153 /* The number of components we use for separate shrink-wrapping. */
155 /* The components already handled by separate shrink-wrapping, which should
156 not be considered by the prologue and epilogue. */
157 bool gpr_is_wrapped_separately
[32];
158 bool fpr_is_wrapped_separately
[32];
159 bool lr_is_wrapped_separately
;
162 /* Support targetm.vectorize.builtin_mask_for_load. */
163 static GTY(()) tree altivec_builtin_mask_for_load
;
165 /* Set to nonzero once AIX common-mode calls have been defined. */
166 static GTY(()) int common_mode_defined
;
168 /* Label number of label created for -mrelocatable, to call to so we can
169 get the address of the GOT section */
170 static int rs6000_pic_labelno
;
173 /* Counter for labels which are to be placed in .fixup. */
174 int fixuplabelno
= 0;
177 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
180 /* Specify the machine mode that pointers have. After generation of rtl, the
181 compiler makes no further distinction between pointers and any other objects
182 of this machine mode. */
183 scalar_int_mode rs6000_pmode
;
185 /* Width in bits of a pointer. */
186 unsigned rs6000_pointer_size
;
188 #ifdef HAVE_AS_GNU_ATTRIBUTE
189 # ifndef HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
190 # define HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE 0
192 /* Flag whether floating point values have been passed/returned.
193 Note that this doesn't say whether fprs are used, since the
194 Tag_GNU_Power_ABI_FP .gnu.attributes value this flag controls
195 should be set for soft-float values passed in gprs and ieee128
196 values passed in vsx registers. */
197 static bool rs6000_passes_float
;
198 static bool rs6000_passes_long_double
;
199 /* Flag whether vector values have been passed/returned. */
200 static bool rs6000_passes_vector
;
201 /* Flag whether small (<= 8 byte) structures have been returned. */
202 static bool rs6000_returns_struct
;
205 /* Value is TRUE if register/mode pair is acceptable. */
206 static bool rs6000_hard_regno_mode_ok_p
207 [NUM_MACHINE_MODES
][FIRST_PSEUDO_REGISTER
];
209 /* Maximum number of registers needed for a given register class and mode. */
210 unsigned char rs6000_class_max_nregs
[NUM_MACHINE_MODES
][LIM_REG_CLASSES
];
212 /* How many registers are needed for a given register and mode. */
213 unsigned char rs6000_hard_regno_nregs
[NUM_MACHINE_MODES
][FIRST_PSEUDO_REGISTER
];
215 /* Map register number to register class. */
216 enum reg_class rs6000_regno_regclass
[FIRST_PSEUDO_REGISTER
];
218 static int dbg_cost_ctrl
;
220 /* Built in types. */
221 tree rs6000_builtin_types
[RS6000_BTI_MAX
];
222 tree rs6000_builtin_decls
[RS6000_BUILTIN_COUNT
];
224 /* Flag to say the TOC is initialized */
225 int toc_initialized
, need_toc_init
;
226 char toc_label_name
[10];
228 /* Cached value of rs6000_variable_issue. This is cached in
229 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
230 static short cached_can_issue_more
;
232 static GTY(()) section
*read_only_data_section
;
233 static GTY(()) section
*private_data_section
;
234 static GTY(()) section
*tls_data_section
;
235 static GTY(()) section
*tls_private_data_section
;
236 static GTY(()) section
*read_only_private_data_section
;
237 static GTY(()) section
*sdata2_section
;
238 static GTY(()) section
*toc_section
;
240 struct builtin_description
242 const HOST_WIDE_INT mask
;
243 const enum insn_code icode
;
244 const char *const name
;
245 const enum rs6000_builtins code
;
248 /* Describe the vector unit used for modes. */
249 enum rs6000_vector rs6000_vector_unit
[NUM_MACHINE_MODES
];
250 enum rs6000_vector rs6000_vector_mem
[NUM_MACHINE_MODES
];
252 /* Register classes for various constraints that are based on the target
254 enum reg_class rs6000_constraints
[RS6000_CONSTRAINT_MAX
];
256 /* Describe the alignment of a vector. */
257 int rs6000_vector_align
[NUM_MACHINE_MODES
];
259 /* Map selected modes to types for builtins. */
260 static GTY(()) tree builtin_mode_to_type
[MAX_MACHINE_MODE
][2];
262 /* What modes to automatically generate reciprocal divide estimate (fre) and
263 reciprocal sqrt (frsqrte) for. */
264 unsigned char rs6000_recip_bits
[MAX_MACHINE_MODE
];
266 /* Masks to determine which reciprocal esitmate instructions to generate
268 enum rs6000_recip_mask
{
269 RECIP_SF_DIV
= 0x001, /* Use divide estimate */
270 RECIP_DF_DIV
= 0x002,
271 RECIP_V4SF_DIV
= 0x004,
272 RECIP_V2DF_DIV
= 0x008,
274 RECIP_SF_RSQRT
= 0x010, /* Use reciprocal sqrt estimate. */
275 RECIP_DF_RSQRT
= 0x020,
276 RECIP_V4SF_RSQRT
= 0x040,
277 RECIP_V2DF_RSQRT
= 0x080,
279 /* Various combination of flags for -mrecip=xxx. */
281 RECIP_ALL
= (RECIP_SF_DIV
| RECIP_DF_DIV
| RECIP_V4SF_DIV
282 | RECIP_V2DF_DIV
| RECIP_SF_RSQRT
| RECIP_DF_RSQRT
283 | RECIP_V4SF_RSQRT
| RECIP_V2DF_RSQRT
),
285 RECIP_HIGH_PRECISION
= RECIP_ALL
,
287 /* On low precision machines like the power5, don't enable double precision
288 reciprocal square root estimate, since it isn't accurate enough. */
289 RECIP_LOW_PRECISION
= (RECIP_ALL
& ~(RECIP_DF_RSQRT
| RECIP_V2DF_RSQRT
))
292 /* -mrecip options. */
295 const char *string
; /* option name */
296 unsigned int mask
; /* mask bits to set */
297 } recip_options
[] = {
298 { "all", RECIP_ALL
},
299 { "none", RECIP_NONE
},
300 { "div", (RECIP_SF_DIV
| RECIP_DF_DIV
| RECIP_V4SF_DIV
302 { "divf", (RECIP_SF_DIV
| RECIP_V4SF_DIV
) },
303 { "divd", (RECIP_DF_DIV
| RECIP_V2DF_DIV
) },
304 { "rsqrt", (RECIP_SF_RSQRT
| RECIP_DF_RSQRT
| RECIP_V4SF_RSQRT
305 | RECIP_V2DF_RSQRT
) },
306 { "rsqrtf", (RECIP_SF_RSQRT
| RECIP_V4SF_RSQRT
) },
307 { "rsqrtd", (RECIP_DF_RSQRT
| RECIP_V2DF_RSQRT
) },
310 /* Used by __builtin_cpu_is(), mapping from PLATFORM names to values. */
316 { "power9", PPC_PLATFORM_POWER9
},
317 { "power8", PPC_PLATFORM_POWER8
},
318 { "power7", PPC_PLATFORM_POWER7
},
319 { "power6x", PPC_PLATFORM_POWER6X
},
320 { "power6", PPC_PLATFORM_POWER6
},
321 { "power5+", PPC_PLATFORM_POWER5_PLUS
},
322 { "power5", PPC_PLATFORM_POWER5
},
323 { "ppc970", PPC_PLATFORM_PPC970
},
324 { "power4", PPC_PLATFORM_POWER4
},
325 { "ppca2", PPC_PLATFORM_PPCA2
},
326 { "ppc476", PPC_PLATFORM_PPC476
},
327 { "ppc464", PPC_PLATFORM_PPC464
},
328 { "ppc440", PPC_PLATFORM_PPC440
},
329 { "ppc405", PPC_PLATFORM_PPC405
},
330 { "ppc-cell-be", PPC_PLATFORM_CELL_BE
}
333 /* Used by __builtin_cpu_supports(), mapping from HWCAP names to masks. */
339 } cpu_supports_info
[] = {
340 /* AT_HWCAP masks. */
341 { "4xxmac", PPC_FEATURE_HAS_4xxMAC
, 0 },
342 { "altivec", PPC_FEATURE_HAS_ALTIVEC
, 0 },
343 { "arch_2_05", PPC_FEATURE_ARCH_2_05
, 0 },
344 { "arch_2_06", PPC_FEATURE_ARCH_2_06
, 0 },
345 { "archpmu", PPC_FEATURE_PERFMON_COMPAT
, 0 },
346 { "booke", PPC_FEATURE_BOOKE
, 0 },
347 { "cellbe", PPC_FEATURE_CELL_BE
, 0 },
348 { "dfp", PPC_FEATURE_HAS_DFP
, 0 },
349 { "efpdouble", PPC_FEATURE_HAS_EFP_DOUBLE
, 0 },
350 { "efpsingle", PPC_FEATURE_HAS_EFP_SINGLE
, 0 },
351 { "fpu", PPC_FEATURE_HAS_FPU
, 0 },
352 { "ic_snoop", PPC_FEATURE_ICACHE_SNOOP
, 0 },
353 { "mmu", PPC_FEATURE_HAS_MMU
, 0 },
354 { "notb", PPC_FEATURE_NO_TB
, 0 },
355 { "pa6t", PPC_FEATURE_PA6T
, 0 },
356 { "power4", PPC_FEATURE_POWER4
, 0 },
357 { "power5", PPC_FEATURE_POWER5
, 0 },
358 { "power5+", PPC_FEATURE_POWER5_PLUS
, 0 },
359 { "power6x", PPC_FEATURE_POWER6_EXT
, 0 },
360 { "ppc32", PPC_FEATURE_32
, 0 },
361 { "ppc601", PPC_FEATURE_601_INSTR
, 0 },
362 { "ppc64", PPC_FEATURE_64
, 0 },
363 { "ppcle", PPC_FEATURE_PPC_LE
, 0 },
364 { "smt", PPC_FEATURE_SMT
, 0 },
365 { "spe", PPC_FEATURE_HAS_SPE
, 0 },
366 { "true_le", PPC_FEATURE_TRUE_LE
, 0 },
367 { "ucache", PPC_FEATURE_UNIFIED_CACHE
, 0 },
368 { "vsx", PPC_FEATURE_HAS_VSX
, 0 },
370 /* AT_HWCAP2 masks. */
371 { "arch_2_07", PPC_FEATURE2_ARCH_2_07
, 1 },
372 { "dscr", PPC_FEATURE2_HAS_DSCR
, 1 },
373 { "ebb", PPC_FEATURE2_HAS_EBB
, 1 },
374 { "htm", PPC_FEATURE2_HAS_HTM
, 1 },
375 { "htm-nosc", PPC_FEATURE2_HTM_NOSC
, 1 },
376 { "isel", PPC_FEATURE2_HAS_ISEL
, 1 },
377 { "tar", PPC_FEATURE2_HAS_TAR
, 1 },
378 { "vcrypto", PPC_FEATURE2_HAS_VEC_CRYPTO
, 1 },
379 { "arch_3_00", PPC_FEATURE2_ARCH_3_00
, 1 },
380 { "ieee128", PPC_FEATURE2_HAS_IEEE128
, 1 },
381 { "darn", PPC_FEATURE2_DARN
, 1 },
382 { "scv", PPC_FEATURE2_SCV
, 1 }
385 /* On PowerPC, we have a limited number of target clones that we care about
386 which means we can use an array to hold the options, rather than having more
387 elaborate data structures to identify each possible variation. Order the
388 clones from the default to the highest ISA. */
390 CLONE_DEFAULT
= 0, /* default clone. */
391 CLONE_ISA_2_05
, /* ISA 2.05 (power6). */
392 CLONE_ISA_2_06
, /* ISA 2.06 (power7). */
393 CLONE_ISA_2_07
, /* ISA 2.07 (power8). */
394 CLONE_ISA_3_00
, /* ISA 3.00 (power9). */
398 /* Map compiler ISA bits into HWCAP names. */
400 HOST_WIDE_INT isa_mask
; /* rs6000_isa mask */
401 const char *name
; /* name to use in __builtin_cpu_supports. */
404 static const struct clone_map rs6000_clone_map
[CLONE_MAX
] = {
405 { 0, "" }, /* Default options. */
406 { OPTION_MASK_CMPB
, "arch_2_05" }, /* ISA 2.05 (power6). */
407 { OPTION_MASK_POPCNTD
, "arch_2_06" }, /* ISA 2.06 (power7). */
408 { OPTION_MASK_P8_VECTOR
, "arch_2_07" }, /* ISA 2.07 (power8). */
409 { OPTION_MASK_P9_VECTOR
, "arch_3_00" }, /* ISA 3.00 (power9). */
413 /* Newer LIBCs explicitly export this symbol to declare that they provide
414 the AT_PLATFORM and AT_HWCAP/AT_HWCAP2 values in the TCB. We emit a
415 reference to this symbol whenever we expand a CPU builtin, so that
416 we never link against an old LIBC. */
417 const char *tcb_verification_symbol
= "__parse_hwcap_and_convert_at_platform";
419 /* True if we have expanded a CPU builtin. */
422 /* Pointer to function (in rs6000-c.c) that can define or undefine target
423 macros that have changed. Languages that don't support the preprocessor
424 don't link in rs6000-c.c, so we can't call it directly. */
425 void (*rs6000_target_modify_macros_ptr
) (bool, HOST_WIDE_INT
, HOST_WIDE_INT
);
427 /* Simplfy register classes into simpler classifications. We assume
428 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
429 check for standard register classes (gpr/floating/altivec/vsx) and
430 floating/vector classes (float/altivec/vsx). */
432 enum rs6000_reg_type
{
443 /* Map register class to register type. */
444 static enum rs6000_reg_type reg_class_to_reg_type
[N_REG_CLASSES
];
446 /* First/last register type for the 'normal' register types (i.e. general
447 purpose, floating point, altivec, and VSX registers). */
448 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
450 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
453 /* Register classes we care about in secondary reload or go if legitimate
454 address. We only need to worry about GPR, FPR, and Altivec registers here,
455 along an ANY field that is the OR of the 3 register classes. */
457 enum rs6000_reload_reg_type
{
458 RELOAD_REG_GPR
, /* General purpose registers. */
459 RELOAD_REG_FPR
, /* Traditional floating point regs. */
460 RELOAD_REG_VMX
, /* Altivec (VMX) registers. */
461 RELOAD_REG_ANY
, /* OR of GPR, FPR, Altivec masks. */
465 /* For setting up register classes, loop through the 3 register classes mapping
466 into real registers, and skip the ANY class, which is just an OR of the
468 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
469 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
471 /* Map reload register type to a register in the register class. */
472 struct reload_reg_map_type
{
473 const char *name
; /* Register class name. */
474 int reg
; /* Register in the register class. */
477 static const struct reload_reg_map_type reload_reg_map
[N_RELOAD_REG
] = {
478 { "Gpr", FIRST_GPR_REGNO
}, /* RELOAD_REG_GPR. */
479 { "Fpr", FIRST_FPR_REGNO
}, /* RELOAD_REG_FPR. */
480 { "VMX", FIRST_ALTIVEC_REGNO
}, /* RELOAD_REG_VMX. */
481 { "Any", -1 }, /* RELOAD_REG_ANY. */
484 /* Mask bits for each register class, indexed per mode. Historically the
485 compiler has been more restrictive which types can do PRE_MODIFY instead of
486 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
487 typedef unsigned char addr_mask_type
;
489 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
490 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
491 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
492 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
493 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
494 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
495 #define RELOAD_REG_AND_M16 0x40 /* AND -16 addressing. */
496 #define RELOAD_REG_QUAD_OFFSET 0x80 /* quad offset is limited. */
498 /* Register type masks based on the type, of valid addressing modes. */
499 struct rs6000_reg_addr
{
500 enum insn_code reload_load
; /* INSN to reload for loading. */
501 enum insn_code reload_store
; /* INSN to reload for storing. */
502 enum insn_code reload_fpr_gpr
; /* INSN to move from FPR to GPR. */
503 enum insn_code reload_gpr_vsx
; /* INSN to move from GPR to VSX. */
504 enum insn_code reload_vsx_gpr
; /* INSN to move from VSX to GPR. */
505 enum insn_code fusion_gpr_ld
; /* INSN for fusing gpr ADDIS/loads. */
506 /* INSNs for fusing addi with loads
507 or stores for each reg. class. */
508 enum insn_code fusion_addi_ld
[(int)N_RELOAD_REG
];
509 enum insn_code fusion_addi_st
[(int)N_RELOAD_REG
];
510 /* INSNs for fusing addis with loads
511 or stores for each reg. class. */
512 enum insn_code fusion_addis_ld
[(int)N_RELOAD_REG
];
513 enum insn_code fusion_addis_st
[(int)N_RELOAD_REG
];
514 addr_mask_type addr_mask
[(int)N_RELOAD_REG
]; /* Valid address masks. */
515 bool scalar_in_vmx_p
; /* Scalar value can go in VMX. */
516 bool fused_toc
; /* Mode supports TOC fusion. */
519 static struct rs6000_reg_addr reg_addr
[NUM_MACHINE_MODES
];
521 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
523 mode_supports_pre_incdec_p (machine_mode mode
)
525 return ((reg_addr
[mode
].addr_mask
[RELOAD_REG_ANY
] & RELOAD_REG_PRE_INCDEC
)
529 /* Helper function to say whether a mode supports PRE_MODIFY. */
531 mode_supports_pre_modify_p (machine_mode mode
)
533 return ((reg_addr
[mode
].addr_mask
[RELOAD_REG_ANY
] & RELOAD_REG_PRE_MODIFY
)
537 /* Given that there exists at least one variable that is set (produced)
538 by OUT_INSN and read (consumed) by IN_INSN, return true iff
539 IN_INSN represents one or more memory store operations and none of
540 the variables set by OUT_INSN is used by IN_INSN as the address of a
541 store operation. If either IN_INSN or OUT_INSN does not represent
542 a "single" RTL SET expression (as loosely defined by the
543 implementation of the single_set function) or a PARALLEL with only
544 SETs, CLOBBERs, and USEs inside, this function returns false.
546 This rs6000-specific version of store_data_bypass_p checks for
547 certain conditions that result in assertion failures (and internal
548 compiler errors) in the generic store_data_bypass_p function and
549 returns false rather than calling store_data_bypass_p if one of the
550 problematic conditions is detected. */
553 rs6000_store_data_bypass_p (rtx_insn
*out_insn
, rtx_insn
*in_insn
)
560 in_set
= single_set (in_insn
);
563 if (MEM_P (SET_DEST (in_set
)))
565 out_set
= single_set (out_insn
);
568 out_pat
= PATTERN (out_insn
);
569 if (GET_CODE (out_pat
) == PARALLEL
)
571 for (i
= 0; i
< XVECLEN (out_pat
, 0); i
++)
573 out_exp
= XVECEXP (out_pat
, 0, i
);
574 if ((GET_CODE (out_exp
) == CLOBBER
)
575 || (GET_CODE (out_exp
) == USE
))
577 else if (GET_CODE (out_exp
) != SET
)
586 in_pat
= PATTERN (in_insn
);
587 if (GET_CODE (in_pat
) != PARALLEL
)
590 for (i
= 0; i
< XVECLEN (in_pat
, 0); i
++)
592 in_exp
= XVECEXP (in_pat
, 0, i
);
593 if ((GET_CODE (in_exp
) == CLOBBER
) || (GET_CODE (in_exp
) == USE
))
595 else if (GET_CODE (in_exp
) != SET
)
598 if (MEM_P (SET_DEST (in_exp
)))
600 out_set
= single_set (out_insn
);
603 out_pat
= PATTERN (out_insn
);
604 if (GET_CODE (out_pat
) != PARALLEL
)
606 for (j
= 0; j
< XVECLEN (out_pat
, 0); j
++)
608 out_exp
= XVECEXP (out_pat
, 0, j
);
609 if ((GET_CODE (out_exp
) == CLOBBER
)
610 || (GET_CODE (out_exp
) == USE
))
612 else if (GET_CODE (out_exp
) != SET
)
619 return store_data_bypass_p (out_insn
, in_insn
);
622 /* Return true if we have D-form addressing in altivec registers. */
624 mode_supports_vmx_dform (machine_mode mode
)
626 return ((reg_addr
[mode
].addr_mask
[RELOAD_REG_VMX
] & RELOAD_REG_OFFSET
) != 0);
629 /* Return true if we have D-form addressing in VSX registers. This addressing
630 is more limited than normal d-form addressing in that the offset must be
631 aligned on a 16-byte boundary. */
633 mode_supports_vsx_dform_quad (machine_mode mode
)
635 return ((reg_addr
[mode
].addr_mask
[RELOAD_REG_ANY
] & RELOAD_REG_QUAD_OFFSET
)
640 /* Processor costs (relative to an add) */
642 const struct processor_costs
*rs6000_cost
;
644 /* Instruction size costs on 32bit processors. */
646 struct processor_costs size32_cost
= {
647 COSTS_N_INSNS (1), /* mulsi */
648 COSTS_N_INSNS (1), /* mulsi_const */
649 COSTS_N_INSNS (1), /* mulsi_const9 */
650 COSTS_N_INSNS (1), /* muldi */
651 COSTS_N_INSNS (1), /* divsi */
652 COSTS_N_INSNS (1), /* divdi */
653 COSTS_N_INSNS (1), /* fp */
654 COSTS_N_INSNS (1), /* dmul */
655 COSTS_N_INSNS (1), /* sdiv */
656 COSTS_N_INSNS (1), /* ddiv */
657 32, /* cache line size */
661 0, /* SF->DF convert */
664 /* Instruction size costs on 64bit processors. */
666 struct processor_costs size64_cost
= {
667 COSTS_N_INSNS (1), /* mulsi */
668 COSTS_N_INSNS (1), /* mulsi_const */
669 COSTS_N_INSNS (1), /* mulsi_const9 */
670 COSTS_N_INSNS (1), /* muldi */
671 COSTS_N_INSNS (1), /* divsi */
672 COSTS_N_INSNS (1), /* divdi */
673 COSTS_N_INSNS (1), /* fp */
674 COSTS_N_INSNS (1), /* dmul */
675 COSTS_N_INSNS (1), /* sdiv */
676 COSTS_N_INSNS (1), /* ddiv */
677 128, /* cache line size */
681 0, /* SF->DF convert */
684 /* Instruction costs on RS64A processors. */
686 struct processor_costs rs64a_cost
= {
687 COSTS_N_INSNS (20), /* mulsi */
688 COSTS_N_INSNS (12), /* mulsi_const */
689 COSTS_N_INSNS (8), /* mulsi_const9 */
690 COSTS_N_INSNS (34), /* muldi */
691 COSTS_N_INSNS (65), /* divsi */
692 COSTS_N_INSNS (67), /* divdi */
693 COSTS_N_INSNS (4), /* fp */
694 COSTS_N_INSNS (4), /* dmul */
695 COSTS_N_INSNS (31), /* sdiv */
696 COSTS_N_INSNS (31), /* ddiv */
697 128, /* cache line size */
701 0, /* SF->DF convert */
704 /* Instruction costs on MPCCORE processors. */
706 struct processor_costs mpccore_cost
= {
707 COSTS_N_INSNS (2), /* mulsi */
708 COSTS_N_INSNS (2), /* mulsi_const */
709 COSTS_N_INSNS (2), /* mulsi_const9 */
710 COSTS_N_INSNS (2), /* muldi */
711 COSTS_N_INSNS (6), /* divsi */
712 COSTS_N_INSNS (6), /* divdi */
713 COSTS_N_INSNS (4), /* fp */
714 COSTS_N_INSNS (5), /* dmul */
715 COSTS_N_INSNS (10), /* sdiv */
716 COSTS_N_INSNS (17), /* ddiv */
717 32, /* cache line size */
721 0, /* SF->DF convert */
724 /* Instruction costs on PPC403 processors. */
726 struct processor_costs ppc403_cost
= {
727 COSTS_N_INSNS (4), /* mulsi */
728 COSTS_N_INSNS (4), /* mulsi_const */
729 COSTS_N_INSNS (4), /* mulsi_const9 */
730 COSTS_N_INSNS (4), /* muldi */
731 COSTS_N_INSNS (33), /* divsi */
732 COSTS_N_INSNS (33), /* divdi */
733 COSTS_N_INSNS (11), /* fp */
734 COSTS_N_INSNS (11), /* dmul */
735 COSTS_N_INSNS (11), /* sdiv */
736 COSTS_N_INSNS (11), /* ddiv */
737 32, /* cache line size */
741 0, /* SF->DF convert */
744 /* Instruction costs on PPC405 processors. */
746 struct processor_costs ppc405_cost
= {
747 COSTS_N_INSNS (5), /* mulsi */
748 COSTS_N_INSNS (4), /* mulsi_const */
749 COSTS_N_INSNS (3), /* mulsi_const9 */
750 COSTS_N_INSNS (5), /* muldi */
751 COSTS_N_INSNS (35), /* divsi */
752 COSTS_N_INSNS (35), /* divdi */
753 COSTS_N_INSNS (11), /* fp */
754 COSTS_N_INSNS (11), /* dmul */
755 COSTS_N_INSNS (11), /* sdiv */
756 COSTS_N_INSNS (11), /* ddiv */
757 32, /* cache line size */
761 0, /* SF->DF convert */
764 /* Instruction costs on PPC440 processors. */
766 struct processor_costs ppc440_cost
= {
767 COSTS_N_INSNS (3), /* mulsi */
768 COSTS_N_INSNS (2), /* mulsi_const */
769 COSTS_N_INSNS (2), /* mulsi_const9 */
770 COSTS_N_INSNS (3), /* muldi */
771 COSTS_N_INSNS (34), /* divsi */
772 COSTS_N_INSNS (34), /* divdi */
773 COSTS_N_INSNS (5), /* fp */
774 COSTS_N_INSNS (5), /* dmul */
775 COSTS_N_INSNS (19), /* sdiv */
776 COSTS_N_INSNS (33), /* ddiv */
777 32, /* cache line size */
781 0, /* SF->DF convert */
784 /* Instruction costs on PPC476 processors. */
786 struct processor_costs ppc476_cost
= {
787 COSTS_N_INSNS (4), /* mulsi */
788 COSTS_N_INSNS (4), /* mulsi_const */
789 COSTS_N_INSNS (4), /* mulsi_const9 */
790 COSTS_N_INSNS (4), /* muldi */
791 COSTS_N_INSNS (11), /* divsi */
792 COSTS_N_INSNS (11), /* divdi */
793 COSTS_N_INSNS (6), /* fp */
794 COSTS_N_INSNS (6), /* dmul */
795 COSTS_N_INSNS (19), /* sdiv */
796 COSTS_N_INSNS (33), /* ddiv */
797 32, /* l1 cache line size */
801 0, /* SF->DF convert */
804 /* Instruction costs on PPC601 processors. */
806 struct processor_costs ppc601_cost
= {
807 COSTS_N_INSNS (5), /* mulsi */
808 COSTS_N_INSNS (5), /* mulsi_const */
809 COSTS_N_INSNS (5), /* mulsi_const9 */
810 COSTS_N_INSNS (5), /* muldi */
811 COSTS_N_INSNS (36), /* divsi */
812 COSTS_N_INSNS (36), /* divdi */
813 COSTS_N_INSNS (4), /* fp */
814 COSTS_N_INSNS (5), /* dmul */
815 COSTS_N_INSNS (17), /* sdiv */
816 COSTS_N_INSNS (31), /* ddiv */
817 32, /* cache line size */
821 0, /* SF->DF convert */
824 /* Instruction costs on PPC603 processors. */
826 struct processor_costs ppc603_cost
= {
827 COSTS_N_INSNS (5), /* mulsi */
828 COSTS_N_INSNS (3), /* mulsi_const */
829 COSTS_N_INSNS (2), /* mulsi_const9 */
830 COSTS_N_INSNS (5), /* muldi */
831 COSTS_N_INSNS (37), /* divsi */
832 COSTS_N_INSNS (37), /* divdi */
833 COSTS_N_INSNS (3), /* fp */
834 COSTS_N_INSNS (4), /* dmul */
835 COSTS_N_INSNS (18), /* sdiv */
836 COSTS_N_INSNS (33), /* ddiv */
837 32, /* cache line size */
841 0, /* SF->DF convert */
844 /* Instruction costs on PPC604 processors. */
846 struct processor_costs ppc604_cost
= {
847 COSTS_N_INSNS (4), /* mulsi */
848 COSTS_N_INSNS (4), /* mulsi_const */
849 COSTS_N_INSNS (4), /* mulsi_const9 */
850 COSTS_N_INSNS (4), /* muldi */
851 COSTS_N_INSNS (20), /* divsi */
852 COSTS_N_INSNS (20), /* divdi */
853 COSTS_N_INSNS (3), /* fp */
854 COSTS_N_INSNS (3), /* dmul */
855 COSTS_N_INSNS (18), /* sdiv */
856 COSTS_N_INSNS (32), /* ddiv */
857 32, /* cache line size */
861 0, /* SF->DF convert */
864 /* Instruction costs on PPC604e processors. */
866 struct processor_costs ppc604e_cost
= {
867 COSTS_N_INSNS (2), /* mulsi */
868 COSTS_N_INSNS (2), /* mulsi_const */
869 COSTS_N_INSNS (2), /* mulsi_const9 */
870 COSTS_N_INSNS (2), /* muldi */
871 COSTS_N_INSNS (20), /* divsi */
872 COSTS_N_INSNS (20), /* divdi */
873 COSTS_N_INSNS (3), /* fp */
874 COSTS_N_INSNS (3), /* dmul */
875 COSTS_N_INSNS (18), /* sdiv */
876 COSTS_N_INSNS (32), /* ddiv */
877 32, /* cache line size */
881 0, /* SF->DF convert */
884 /* Instruction costs on PPC620 processors. */
886 struct processor_costs ppc620_cost
= {
887 COSTS_N_INSNS (5), /* mulsi */
888 COSTS_N_INSNS (4), /* mulsi_const */
889 COSTS_N_INSNS (3), /* mulsi_const9 */
890 COSTS_N_INSNS (7), /* muldi */
891 COSTS_N_INSNS (21), /* divsi */
892 COSTS_N_INSNS (37), /* divdi */
893 COSTS_N_INSNS (3), /* fp */
894 COSTS_N_INSNS (3), /* dmul */
895 COSTS_N_INSNS (18), /* sdiv */
896 COSTS_N_INSNS (32), /* ddiv */
897 128, /* cache line size */
901 0, /* SF->DF convert */
904 /* Instruction costs on PPC630 processors. */
906 struct processor_costs ppc630_cost
= {
907 COSTS_N_INSNS (5), /* mulsi */
908 COSTS_N_INSNS (4), /* mulsi_const */
909 COSTS_N_INSNS (3), /* mulsi_const9 */
910 COSTS_N_INSNS (7), /* muldi */
911 COSTS_N_INSNS (21), /* divsi */
912 COSTS_N_INSNS (37), /* divdi */
913 COSTS_N_INSNS (3), /* fp */
914 COSTS_N_INSNS (3), /* dmul */
915 COSTS_N_INSNS (17), /* sdiv */
916 COSTS_N_INSNS (21), /* ddiv */
917 128, /* cache line size */
921 0, /* SF->DF convert */
924 /* Instruction costs on Cell processor. */
925 /* COSTS_N_INSNS (1) ~ one add. */
927 struct processor_costs ppccell_cost
= {
928 COSTS_N_INSNS (9/2)+2, /* mulsi */
929 COSTS_N_INSNS (6/2), /* mulsi_const */
930 COSTS_N_INSNS (6/2), /* mulsi_const9 */
931 COSTS_N_INSNS (15/2)+2, /* muldi */
932 COSTS_N_INSNS (38/2), /* divsi */
933 COSTS_N_INSNS (70/2), /* divdi */
934 COSTS_N_INSNS (10/2), /* fp */
935 COSTS_N_INSNS (10/2), /* dmul */
936 COSTS_N_INSNS (74/2), /* sdiv */
937 COSTS_N_INSNS (74/2), /* ddiv */
938 128, /* cache line size */
942 0, /* SF->DF convert */
945 /* Instruction costs on PPC750 and PPC7400 processors. */
947 struct processor_costs ppc750_cost
= {
948 COSTS_N_INSNS (5), /* mulsi */
949 COSTS_N_INSNS (3), /* mulsi_const */
950 COSTS_N_INSNS (2), /* mulsi_const9 */
951 COSTS_N_INSNS (5), /* muldi */
952 COSTS_N_INSNS (17), /* divsi */
953 COSTS_N_INSNS (17), /* divdi */
954 COSTS_N_INSNS (3), /* fp */
955 COSTS_N_INSNS (3), /* dmul */
956 COSTS_N_INSNS (17), /* sdiv */
957 COSTS_N_INSNS (31), /* ddiv */
958 32, /* cache line size */
962 0, /* SF->DF convert */
965 /* Instruction costs on PPC7450 processors. */
967 struct processor_costs ppc7450_cost
= {
968 COSTS_N_INSNS (4), /* mulsi */
969 COSTS_N_INSNS (3), /* mulsi_const */
970 COSTS_N_INSNS (3), /* mulsi_const9 */
971 COSTS_N_INSNS (4), /* muldi */
972 COSTS_N_INSNS (23), /* divsi */
973 COSTS_N_INSNS (23), /* divdi */
974 COSTS_N_INSNS (5), /* fp */
975 COSTS_N_INSNS (5), /* dmul */
976 COSTS_N_INSNS (21), /* sdiv */
977 COSTS_N_INSNS (35), /* ddiv */
978 32, /* cache line size */
982 0, /* SF->DF convert */
985 /* Instruction costs on PPC8540 processors. */
987 struct processor_costs ppc8540_cost
= {
988 COSTS_N_INSNS (4), /* mulsi */
989 COSTS_N_INSNS (4), /* mulsi_const */
990 COSTS_N_INSNS (4), /* mulsi_const9 */
991 COSTS_N_INSNS (4), /* muldi */
992 COSTS_N_INSNS (19), /* divsi */
993 COSTS_N_INSNS (19), /* divdi */
994 COSTS_N_INSNS (4), /* fp */
995 COSTS_N_INSNS (4), /* dmul */
996 COSTS_N_INSNS (29), /* sdiv */
997 COSTS_N_INSNS (29), /* ddiv */
998 32, /* cache line size */
1001 1, /* prefetch streams /*/
1002 0, /* SF->DF convert */
1005 /* Instruction costs on E300C2 and E300C3 cores. */
1007 struct processor_costs ppce300c2c3_cost
= {
1008 COSTS_N_INSNS (4), /* mulsi */
1009 COSTS_N_INSNS (4), /* mulsi_const */
1010 COSTS_N_INSNS (4), /* mulsi_const9 */
1011 COSTS_N_INSNS (4), /* muldi */
1012 COSTS_N_INSNS (19), /* divsi */
1013 COSTS_N_INSNS (19), /* divdi */
1014 COSTS_N_INSNS (3), /* fp */
1015 COSTS_N_INSNS (4), /* dmul */
1016 COSTS_N_INSNS (18), /* sdiv */
1017 COSTS_N_INSNS (33), /* ddiv */
1021 1, /* prefetch streams /*/
1022 0, /* SF->DF convert */
1025 /* Instruction costs on PPCE500MC processors. */
1027 struct processor_costs ppce500mc_cost
= {
1028 COSTS_N_INSNS (4), /* mulsi */
1029 COSTS_N_INSNS (4), /* mulsi_const */
1030 COSTS_N_INSNS (4), /* mulsi_const9 */
1031 COSTS_N_INSNS (4), /* muldi */
1032 COSTS_N_INSNS (14), /* divsi */
1033 COSTS_N_INSNS (14), /* divdi */
1034 COSTS_N_INSNS (8), /* fp */
1035 COSTS_N_INSNS (10), /* dmul */
1036 COSTS_N_INSNS (36), /* sdiv */
1037 COSTS_N_INSNS (66), /* ddiv */
1038 64, /* cache line size */
1041 1, /* prefetch streams /*/
1042 0, /* SF->DF convert */
1045 /* Instruction costs on PPCE500MC64 processors. */
1047 struct processor_costs ppce500mc64_cost
= {
1048 COSTS_N_INSNS (4), /* mulsi */
1049 COSTS_N_INSNS (4), /* mulsi_const */
1050 COSTS_N_INSNS (4), /* mulsi_const9 */
1051 COSTS_N_INSNS (4), /* muldi */
1052 COSTS_N_INSNS (14), /* divsi */
1053 COSTS_N_INSNS (14), /* divdi */
1054 COSTS_N_INSNS (4), /* fp */
1055 COSTS_N_INSNS (10), /* dmul */
1056 COSTS_N_INSNS (36), /* sdiv */
1057 COSTS_N_INSNS (66), /* ddiv */
1058 64, /* cache line size */
1061 1, /* prefetch streams /*/
1062 0, /* SF->DF convert */
1065 /* Instruction costs on PPCE5500 processors. */
1067 struct processor_costs ppce5500_cost
= {
1068 COSTS_N_INSNS (5), /* mulsi */
1069 COSTS_N_INSNS (5), /* mulsi_const */
1070 COSTS_N_INSNS (4), /* mulsi_const9 */
1071 COSTS_N_INSNS (5), /* muldi */
1072 COSTS_N_INSNS (14), /* divsi */
1073 COSTS_N_INSNS (14), /* divdi */
1074 COSTS_N_INSNS (7), /* fp */
1075 COSTS_N_INSNS (10), /* dmul */
1076 COSTS_N_INSNS (36), /* sdiv */
1077 COSTS_N_INSNS (66), /* ddiv */
1078 64, /* cache line size */
1081 1, /* prefetch streams /*/
1082 0, /* SF->DF convert */
1085 /* Instruction costs on PPCE6500 processors. */
1087 struct processor_costs ppce6500_cost
= {
1088 COSTS_N_INSNS (5), /* mulsi */
1089 COSTS_N_INSNS (5), /* mulsi_const */
1090 COSTS_N_INSNS (4), /* mulsi_const9 */
1091 COSTS_N_INSNS (5), /* muldi */
1092 COSTS_N_INSNS (14), /* divsi */
1093 COSTS_N_INSNS (14), /* divdi */
1094 COSTS_N_INSNS (7), /* fp */
1095 COSTS_N_INSNS (10), /* dmul */
1096 COSTS_N_INSNS (36), /* sdiv */
1097 COSTS_N_INSNS (66), /* ddiv */
1098 64, /* cache line size */
1101 1, /* prefetch streams /*/
1102 0, /* SF->DF convert */
1105 /* Instruction costs on AppliedMicro Titan processors. */
1107 struct processor_costs titan_cost
= {
1108 COSTS_N_INSNS (5), /* mulsi */
1109 COSTS_N_INSNS (5), /* mulsi_const */
1110 COSTS_N_INSNS (5), /* mulsi_const9 */
1111 COSTS_N_INSNS (5), /* muldi */
1112 COSTS_N_INSNS (18), /* divsi */
1113 COSTS_N_INSNS (18), /* divdi */
1114 COSTS_N_INSNS (10), /* fp */
1115 COSTS_N_INSNS (10), /* dmul */
1116 COSTS_N_INSNS (46), /* sdiv */
1117 COSTS_N_INSNS (72), /* ddiv */
1118 32, /* cache line size */
1121 1, /* prefetch streams /*/
1122 0, /* SF->DF convert */
1125 /* Instruction costs on POWER4 and POWER5 processors. */
1127 struct processor_costs power4_cost
= {
1128 COSTS_N_INSNS (3), /* mulsi */
1129 COSTS_N_INSNS (2), /* mulsi_const */
1130 COSTS_N_INSNS (2), /* mulsi_const9 */
1131 COSTS_N_INSNS (4), /* muldi */
1132 COSTS_N_INSNS (18), /* divsi */
1133 COSTS_N_INSNS (34), /* divdi */
1134 COSTS_N_INSNS (3), /* fp */
1135 COSTS_N_INSNS (3), /* dmul */
1136 COSTS_N_INSNS (17), /* sdiv */
1137 COSTS_N_INSNS (17), /* ddiv */
1138 128, /* cache line size */
1140 1024, /* l2 cache */
1141 8, /* prefetch streams /*/
1142 0, /* SF->DF convert */
1145 /* Instruction costs on POWER6 processors. */
1147 struct processor_costs power6_cost
= {
1148 COSTS_N_INSNS (8), /* mulsi */
1149 COSTS_N_INSNS (8), /* mulsi_const */
1150 COSTS_N_INSNS (8), /* mulsi_const9 */
1151 COSTS_N_INSNS (8), /* muldi */
1152 COSTS_N_INSNS (22), /* divsi */
1153 COSTS_N_INSNS (28), /* divdi */
1154 COSTS_N_INSNS (3), /* fp */
1155 COSTS_N_INSNS (3), /* dmul */
1156 COSTS_N_INSNS (13), /* sdiv */
1157 COSTS_N_INSNS (16), /* ddiv */
1158 128, /* cache line size */
1160 2048, /* l2 cache */
1161 16, /* prefetch streams */
1162 0, /* SF->DF convert */
1165 /* Instruction costs on POWER7 processors. */
1167 struct processor_costs power7_cost
= {
1168 COSTS_N_INSNS (2), /* mulsi */
1169 COSTS_N_INSNS (2), /* mulsi_const */
1170 COSTS_N_INSNS (2), /* mulsi_const9 */
1171 COSTS_N_INSNS (2), /* muldi */
1172 COSTS_N_INSNS (18), /* divsi */
1173 COSTS_N_INSNS (34), /* divdi */
1174 COSTS_N_INSNS (3), /* fp */
1175 COSTS_N_INSNS (3), /* dmul */
1176 COSTS_N_INSNS (13), /* sdiv */
1177 COSTS_N_INSNS (16), /* ddiv */
1178 128, /* cache line size */
1181 12, /* prefetch streams */
1182 COSTS_N_INSNS (3), /* SF->DF convert */
1185 /* Instruction costs on POWER8 processors. */
1187 struct processor_costs power8_cost
= {
1188 COSTS_N_INSNS (3), /* mulsi */
1189 COSTS_N_INSNS (3), /* mulsi_const */
1190 COSTS_N_INSNS (3), /* mulsi_const9 */
1191 COSTS_N_INSNS (3), /* muldi */
1192 COSTS_N_INSNS (19), /* divsi */
1193 COSTS_N_INSNS (35), /* divdi */
1194 COSTS_N_INSNS (3), /* fp */
1195 COSTS_N_INSNS (3), /* dmul */
1196 COSTS_N_INSNS (14), /* sdiv */
1197 COSTS_N_INSNS (17), /* ddiv */
1198 128, /* cache line size */
1201 12, /* prefetch streams */
1202 COSTS_N_INSNS (3), /* SF->DF convert */
1205 /* Instruction costs on POWER9 processors. */
1207 struct processor_costs power9_cost
= {
1208 COSTS_N_INSNS (3), /* mulsi */
1209 COSTS_N_INSNS (3), /* mulsi_const */
1210 COSTS_N_INSNS (3), /* mulsi_const9 */
1211 COSTS_N_INSNS (3), /* muldi */
1212 COSTS_N_INSNS (8), /* divsi */
1213 COSTS_N_INSNS (12), /* divdi */
1214 COSTS_N_INSNS (3), /* fp */
1215 COSTS_N_INSNS (3), /* dmul */
1216 COSTS_N_INSNS (13), /* sdiv */
1217 COSTS_N_INSNS (18), /* ddiv */
1218 128, /* cache line size */
1221 8, /* prefetch streams */
1222 COSTS_N_INSNS (3), /* SF->DF convert */
1225 /* Instruction costs on POWER A2 processors. */
1227 struct processor_costs ppca2_cost
= {
1228 COSTS_N_INSNS (16), /* mulsi */
1229 COSTS_N_INSNS (16), /* mulsi_const */
1230 COSTS_N_INSNS (16), /* mulsi_const9 */
1231 COSTS_N_INSNS (16), /* muldi */
1232 COSTS_N_INSNS (22), /* divsi */
1233 COSTS_N_INSNS (28), /* divdi */
1234 COSTS_N_INSNS (3), /* fp */
1235 COSTS_N_INSNS (3), /* dmul */
1236 COSTS_N_INSNS (59), /* sdiv */
1237 COSTS_N_INSNS (72), /* ddiv */
1240 2048, /* l2 cache */
1241 16, /* prefetch streams */
1242 0, /* SF->DF convert */
1246 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
1247 #undef RS6000_BUILTIN_0
1248 #undef RS6000_BUILTIN_1
1249 #undef RS6000_BUILTIN_2
1250 #undef RS6000_BUILTIN_3
1251 #undef RS6000_BUILTIN_A
1252 #undef RS6000_BUILTIN_D
1253 #undef RS6000_BUILTIN_H
1254 #undef RS6000_BUILTIN_P
1255 #undef RS6000_BUILTIN_Q
1256 #undef RS6000_BUILTIN_X
1258 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
1259 { NAME, ICODE, MASK, ATTR },
1261 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1262 { NAME, ICODE, MASK, ATTR },
1264 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1265 { NAME, ICODE, MASK, ATTR },
1267 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1268 { NAME, ICODE, MASK, ATTR },
1270 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1271 { NAME, ICODE, MASK, ATTR },
1273 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1274 { NAME, ICODE, MASK, ATTR },
1276 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1277 { NAME, ICODE, MASK, ATTR },
1279 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1280 { NAME, ICODE, MASK, ATTR },
1282 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
1283 { NAME, ICODE, MASK, ATTR },
1285 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1286 { NAME, ICODE, MASK, ATTR },
1288 struct rs6000_builtin_info_type
{
1290 const enum insn_code icode
;
1291 const HOST_WIDE_INT mask
;
1292 const unsigned attr
;
1295 static const struct rs6000_builtin_info_type rs6000_builtin_info
[] =
1297 #include "rs6000-builtin.def"
1300 #undef RS6000_BUILTIN_0
1301 #undef RS6000_BUILTIN_1
1302 #undef RS6000_BUILTIN_2
1303 #undef RS6000_BUILTIN_3
1304 #undef RS6000_BUILTIN_A
1305 #undef RS6000_BUILTIN_D
1306 #undef RS6000_BUILTIN_H
1307 #undef RS6000_BUILTIN_P
1308 #undef RS6000_BUILTIN_Q
1309 #undef RS6000_BUILTIN_X
1311 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1312 static tree (*rs6000_veclib_handler
) (combined_fn
, tree
, tree
);
1315 static bool rs6000_debug_legitimate_address_p (machine_mode
, rtx
, bool);
1316 static struct machine_function
* rs6000_init_machine_status (void);
1317 static int rs6000_ra_ever_killed (void);
1318 static tree
rs6000_handle_longcall_attribute (tree
*, tree
, tree
, int, bool *);
1319 static tree
rs6000_handle_altivec_attribute (tree
*, tree
, tree
, int, bool *);
1320 static tree
rs6000_handle_struct_attribute (tree
*, tree
, tree
, int, bool *);
1321 static tree
rs6000_builtin_vectorized_libmass (combined_fn
, tree
, tree
);
1322 static void rs6000_emit_set_long_const (rtx
, HOST_WIDE_INT
);
1323 static int rs6000_memory_move_cost (machine_mode
, reg_class_t
, bool);
1324 static bool rs6000_debug_rtx_costs (rtx
, machine_mode
, int, int, int *, bool);
1325 static int rs6000_debug_address_cost (rtx
, machine_mode
, addr_space_t
,
1327 static int rs6000_debug_adjust_cost (rtx_insn
*, int, rtx_insn
*, int,
1329 static bool is_microcoded_insn (rtx_insn
*);
1330 static bool is_nonpipeline_insn (rtx_insn
*);
1331 static bool is_cracked_insn (rtx_insn
*);
1332 static bool is_load_insn (rtx
, rtx
*);
1333 static bool is_store_insn (rtx
, rtx
*);
1334 static bool set_to_load_agen (rtx_insn
*,rtx_insn
*);
1335 static bool insn_terminates_group_p (rtx_insn
*, enum group_termination
);
1336 static bool insn_must_be_first_in_group (rtx_insn
*);
1337 static bool insn_must_be_last_in_group (rtx_insn
*);
1338 static void altivec_init_builtins (void);
1339 static tree
builtin_function_type (machine_mode
, machine_mode
,
1340 machine_mode
, machine_mode
,
1341 enum rs6000_builtins
, const char *name
);
1342 static void rs6000_common_init_builtins (void);
1343 static void paired_init_builtins (void);
1344 static rtx
paired_expand_predicate_builtin (enum insn_code
, tree
, rtx
);
1345 static void htm_init_builtins (void);
1346 static int rs6000_emit_int_cmove (rtx
, rtx
, rtx
, rtx
);
1347 static rs6000_stack_t
*rs6000_stack_info (void);
1348 static void is_altivec_return_reg (rtx
, void *);
1349 int easy_vector_constant (rtx
, machine_mode
);
1350 static rtx
rs6000_debug_legitimize_address (rtx
, rtx
, machine_mode
);
1351 static rtx
rs6000_legitimize_tls_address (rtx
, enum tls_model
);
1352 static rtx
rs6000_darwin64_record_arg (CUMULATIVE_ARGS
*, const_tree
,
1355 static void macho_branch_islands (void);
1357 static rtx
rs6000_legitimize_reload_address (rtx
, machine_mode
, int, int,
1359 static rtx
rs6000_debug_legitimize_reload_address (rtx
, machine_mode
, int,
1361 static bool rs6000_mode_dependent_address (const_rtx
);
1362 static bool rs6000_debug_mode_dependent_address (const_rtx
);
1363 static enum reg_class
rs6000_secondary_reload_class (enum reg_class
,
1365 static enum reg_class
rs6000_debug_secondary_reload_class (enum reg_class
,
1368 static enum reg_class
rs6000_preferred_reload_class (rtx
, enum reg_class
);
1369 static enum reg_class
rs6000_debug_preferred_reload_class (rtx
,
1371 static bool rs6000_debug_secondary_memory_needed (machine_mode
,
1374 static bool rs6000_debug_can_change_mode_class (machine_mode
,
1377 static bool rs6000_save_toc_in_prologue_p (void);
1378 static rtx
rs6000_internal_arg_pointer (void);
1380 rtx (*rs6000_legitimize_reload_address_ptr
) (rtx
, machine_mode
, int, int,
1382 = rs6000_legitimize_reload_address
;
1384 static bool (*rs6000_mode_dependent_address_ptr
) (const_rtx
)
1385 = rs6000_mode_dependent_address
;
1387 enum reg_class (*rs6000_secondary_reload_class_ptr
) (enum reg_class
,
1389 = rs6000_secondary_reload_class
;
1391 enum reg_class (*rs6000_preferred_reload_class_ptr
) (rtx
, enum reg_class
)
1392 = rs6000_preferred_reload_class
;
1394 const int INSN_NOT_AVAILABLE
= -1;
1396 static void rs6000_print_isa_options (FILE *, int, const char *,
1398 static void rs6000_print_builtin_options (FILE *, int, const char *,
1400 static HOST_WIDE_INT
rs6000_disable_incompatible_switches (void);
1402 static enum rs6000_reg_type
register_to_reg_type (rtx
, bool *);
1403 static bool rs6000_secondary_reload_move (enum rs6000_reg_type
,
1404 enum rs6000_reg_type
,
1406 secondary_reload_info
*,
1408 rtl_opt_pass
*make_pass_analyze_swaps (gcc::context
*);
1409 static bool rs6000_keep_leaf_when_profiled () __attribute__ ((unused
));
1410 static tree
rs6000_fold_builtin (tree
, int, tree
*, bool);
1412 /* Hash table stuff for keeping track of TOC entries. */
1414 struct GTY((for_user
)) toc_hash_struct
1416 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1417 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1419 machine_mode key_mode
;
1423 struct toc_hasher
: ggc_ptr_hash
<toc_hash_struct
>
1425 static hashval_t
hash (toc_hash_struct
*);
1426 static bool equal (toc_hash_struct
*, toc_hash_struct
*);
1429 static GTY (()) hash_table
<toc_hasher
> *toc_hash_table
;
1431 /* Hash table to keep track of the argument types for builtin functions. */
1433 struct GTY((for_user
)) builtin_hash_struct
1436 machine_mode mode
[4]; /* return value + 3 arguments. */
1437 unsigned char uns_p
[4]; /* and whether the types are unsigned. */
1440 struct builtin_hasher
: ggc_ptr_hash
<builtin_hash_struct
>
1442 static hashval_t
hash (builtin_hash_struct
*);
1443 static bool equal (builtin_hash_struct
*, builtin_hash_struct
*);
1446 static GTY (()) hash_table
<builtin_hasher
> *builtin_hash_table
;
1449 /* Default register names. */
1450 char rs6000_reg_names
[][8] =
1452 "0", "1", "2", "3", "4", "5", "6", "7",
1453 "8", "9", "10", "11", "12", "13", "14", "15",
1454 "16", "17", "18", "19", "20", "21", "22", "23",
1455 "24", "25", "26", "27", "28", "29", "30", "31",
1456 "0", "1", "2", "3", "4", "5", "6", "7",
1457 "8", "9", "10", "11", "12", "13", "14", "15",
1458 "16", "17", "18", "19", "20", "21", "22", "23",
1459 "24", "25", "26", "27", "28", "29", "30", "31",
1460 "mq", "lr", "ctr","ap",
1461 "0", "1", "2", "3", "4", "5", "6", "7",
1463 /* AltiVec registers. */
1464 "0", "1", "2", "3", "4", "5", "6", "7",
1465 "8", "9", "10", "11", "12", "13", "14", "15",
1466 "16", "17", "18", "19", "20", "21", "22", "23",
1467 "24", "25", "26", "27", "28", "29", "30", "31",
1469 /* Soft frame pointer. */
1471 /* HTM SPR registers. */
1472 "tfhar", "tfiar", "texasr"
1475 #ifdef TARGET_REGNAMES
1476 static const char alt_reg_names
[][8] =
1478 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1479 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1480 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1481 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1482 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1483 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1484 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1485 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1486 "mq", "lr", "ctr", "ap",
1487 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1489 /* AltiVec registers. */
1490 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1491 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1492 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1493 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1495 /* Soft frame pointer. */
1497 /* HTM SPR registers. */
1498 "tfhar", "tfiar", "texasr"
1502 /* Table of valid machine attributes. */
1504 static const struct attribute_spec rs6000_attribute_table
[] =
1506 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
1507 affects_type_identity } */
1508 { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute
,
1510 { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute
,
1512 { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute
,
1514 { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute
,
1516 { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute
,
1518 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1519 SUBTARGET_ATTRIBUTE_TABLE
,
1521 { NULL
, 0, 0, false, false, false, NULL
, false }
1524 #ifndef TARGET_PROFILE_KERNEL
1525 #define TARGET_PROFILE_KERNEL 0
1528 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1529 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1531 /* Initialize the GCC target structure. */
1532 #undef TARGET_ATTRIBUTE_TABLE
1533 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1534 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1535 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1536 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1537 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1539 #undef TARGET_ASM_ALIGNED_DI_OP
1540 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1542 /* Default unaligned ops are only provided for ELF. Find the ops needed
1543 for non-ELF systems. */
1544 #ifndef OBJECT_FORMAT_ELF
1546 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1548 #undef TARGET_ASM_UNALIGNED_HI_OP
1549 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1550 #undef TARGET_ASM_UNALIGNED_SI_OP
1551 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1552 #undef TARGET_ASM_UNALIGNED_DI_OP
1553 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1556 #undef TARGET_ASM_UNALIGNED_HI_OP
1557 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1558 #undef TARGET_ASM_UNALIGNED_SI_OP
1559 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1560 #undef TARGET_ASM_UNALIGNED_DI_OP
1561 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1562 #undef TARGET_ASM_ALIGNED_DI_OP
1563 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1567 /* This hook deals with fixups for relocatable code and DI-mode objects
1569 #undef TARGET_ASM_INTEGER
1570 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1572 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1573 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1574 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1577 #undef TARGET_SET_UP_BY_PROLOGUE
1578 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1580 #undef TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS
1581 #define TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS rs6000_get_separate_components
1582 #undef TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB
1583 #define TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB rs6000_components_for_bb
1584 #undef TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS
1585 #define TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS rs6000_disqualify_components
1586 #undef TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS
1587 #define TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS rs6000_emit_prologue_components
1588 #undef TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS
1589 #define TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS rs6000_emit_epilogue_components
1590 #undef TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS
1591 #define TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS rs6000_set_handled_components
1593 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1594 #define TARGET_EXTRA_LIVE_ON_ENTRY rs6000_live_on_entry
1596 #undef TARGET_INTERNAL_ARG_POINTER
1597 #define TARGET_INTERNAL_ARG_POINTER rs6000_internal_arg_pointer
1599 #undef TARGET_HAVE_TLS
1600 #define TARGET_HAVE_TLS HAVE_AS_TLS
1602 #undef TARGET_CANNOT_FORCE_CONST_MEM
1603 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1605 #undef TARGET_DELEGITIMIZE_ADDRESS
1606 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1608 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1609 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1611 #undef TARGET_LEGITIMATE_COMBINED_INSN
1612 #define TARGET_LEGITIMATE_COMBINED_INSN rs6000_legitimate_combined_insn
1614 #undef TARGET_ASM_FUNCTION_PROLOGUE
1615 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1616 #undef TARGET_ASM_FUNCTION_EPILOGUE
1617 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1619 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1620 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1622 #undef TARGET_LEGITIMIZE_ADDRESS
1623 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1625 #undef TARGET_SCHED_VARIABLE_ISSUE
1626 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1628 #undef TARGET_SCHED_ISSUE_RATE
1629 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1630 #undef TARGET_SCHED_ADJUST_COST
1631 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1632 #undef TARGET_SCHED_ADJUST_PRIORITY
1633 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1634 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1635 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1636 #undef TARGET_SCHED_INIT
1637 #define TARGET_SCHED_INIT rs6000_sched_init
1638 #undef TARGET_SCHED_FINISH
1639 #define TARGET_SCHED_FINISH rs6000_sched_finish
1640 #undef TARGET_SCHED_REORDER
1641 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1642 #undef TARGET_SCHED_REORDER2
1643 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1645 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1646 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1648 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1649 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1651 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1652 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1653 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1654 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1655 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1656 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1657 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1658 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1660 #undef TARGET_SCHED_CAN_SPECULATE_INSN
1661 #define TARGET_SCHED_CAN_SPECULATE_INSN rs6000_sched_can_speculate_insn
1663 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1664 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1665 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1666 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1667 rs6000_builtin_support_vector_misalignment
1668 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1669 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1670 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1671 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1672 rs6000_builtin_vectorization_cost
1673 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1674 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1675 rs6000_preferred_simd_mode
1676 #undef TARGET_VECTORIZE_INIT_COST
1677 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1678 #undef TARGET_VECTORIZE_ADD_STMT_COST
1679 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1680 #undef TARGET_VECTORIZE_FINISH_COST
1681 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1682 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1683 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1685 #undef TARGET_INIT_BUILTINS
1686 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1687 #undef TARGET_BUILTIN_DECL
1688 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1690 #undef TARGET_FOLD_BUILTIN
1691 #define TARGET_FOLD_BUILTIN rs6000_fold_builtin
1692 #undef TARGET_GIMPLE_FOLD_BUILTIN
1693 #define TARGET_GIMPLE_FOLD_BUILTIN rs6000_gimple_fold_builtin
1695 #undef TARGET_EXPAND_BUILTIN
1696 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1698 #undef TARGET_MANGLE_TYPE
1699 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1701 #undef TARGET_INIT_LIBFUNCS
1702 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1705 #undef TARGET_BINDS_LOCAL_P
1706 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1709 #undef TARGET_MS_BITFIELD_LAYOUT_P
1710 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1712 #undef TARGET_ASM_OUTPUT_MI_THUNK
1713 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1715 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1716 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1718 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1719 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1721 #undef TARGET_REGISTER_MOVE_COST
1722 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1723 #undef TARGET_MEMORY_MOVE_COST
1724 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1725 #undef TARGET_CANNOT_COPY_INSN_P
1726 #define TARGET_CANNOT_COPY_INSN_P rs6000_cannot_copy_insn_p
1727 #undef TARGET_RTX_COSTS
1728 #define TARGET_RTX_COSTS rs6000_rtx_costs
1729 #undef TARGET_ADDRESS_COST
1730 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1731 #undef TARGET_INSN_COST
1732 #define TARGET_INSN_COST rs6000_insn_cost
1734 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1735 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1737 #undef TARGET_PROMOTE_FUNCTION_MODE
1738 #define TARGET_PROMOTE_FUNCTION_MODE rs6000_promote_function_mode
1740 #undef TARGET_RETURN_IN_MEMORY
1741 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1743 #undef TARGET_RETURN_IN_MSB
1744 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1746 #undef TARGET_SETUP_INCOMING_VARARGS
1747 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1749 /* Always strict argument naming on rs6000. */
1750 #undef TARGET_STRICT_ARGUMENT_NAMING
1751 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1752 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1753 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1754 #undef TARGET_SPLIT_COMPLEX_ARG
1755 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1756 #undef TARGET_MUST_PASS_IN_STACK
1757 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1758 #undef TARGET_PASS_BY_REFERENCE
1759 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1760 #undef TARGET_ARG_PARTIAL_BYTES
1761 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1762 #undef TARGET_FUNCTION_ARG_ADVANCE
1763 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1764 #undef TARGET_FUNCTION_ARG
1765 #define TARGET_FUNCTION_ARG rs6000_function_arg
1766 #undef TARGET_FUNCTION_ARG_PADDING
1767 #define TARGET_FUNCTION_ARG_PADDING rs6000_function_arg_padding
1768 #undef TARGET_FUNCTION_ARG_BOUNDARY
1769 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1771 #undef TARGET_BUILD_BUILTIN_VA_LIST
1772 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1774 #undef TARGET_EXPAND_BUILTIN_VA_START
1775 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1777 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1778 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1780 #undef TARGET_EH_RETURN_FILTER_MODE
1781 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1783 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1784 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1786 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1787 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1789 #undef TARGET_FLOATN_MODE
1790 #define TARGET_FLOATN_MODE rs6000_floatn_mode
1792 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1793 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1795 #undef TARGET_ASM_LOOP_ALIGN_MAX_SKIP
1796 #define TARGET_ASM_LOOP_ALIGN_MAX_SKIP rs6000_loop_align_max_skip
1798 #undef TARGET_MD_ASM_ADJUST
1799 #define TARGET_MD_ASM_ADJUST rs6000_md_asm_adjust
1801 #undef TARGET_OPTION_OVERRIDE
1802 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1804 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1805 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1806 rs6000_builtin_vectorized_function
1808 #undef TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION
1809 #define TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION \
1810 rs6000_builtin_md_vectorized_function
1812 #undef TARGET_STACK_PROTECT_GUARD
1813 #define TARGET_STACK_PROTECT_GUARD rs6000_init_stack_protect_guard
1816 #undef TARGET_STACK_PROTECT_FAIL
1817 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1821 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1822 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1825 /* Use a 32-bit anchor range. This leads to sequences like:
1827 addis tmp,anchor,high
1830 where tmp itself acts as an anchor, and can be shared between
1831 accesses to the same 64k page. */
1832 #undef TARGET_MIN_ANCHOR_OFFSET
1833 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1834 #undef TARGET_MAX_ANCHOR_OFFSET
1835 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1836 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1837 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1838 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1839 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1841 #undef TARGET_BUILTIN_RECIPROCAL
1842 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1844 #undef TARGET_SECONDARY_RELOAD
1845 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1846 #undef TARGET_SECONDARY_MEMORY_NEEDED
1847 #define TARGET_SECONDARY_MEMORY_NEEDED rs6000_secondary_memory_needed
1848 #undef TARGET_SECONDARY_MEMORY_NEEDED_MODE
1849 #define TARGET_SECONDARY_MEMORY_NEEDED_MODE rs6000_secondary_memory_needed_mode
1851 #undef TARGET_LEGITIMATE_ADDRESS_P
1852 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1854 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1855 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1857 #undef TARGET_COMPUTE_PRESSURE_CLASSES
1858 #define TARGET_COMPUTE_PRESSURE_CLASSES rs6000_compute_pressure_classes
1860 #undef TARGET_CAN_ELIMINATE
1861 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1863 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1864 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1866 #undef TARGET_SCHED_REASSOCIATION_WIDTH
1867 #define TARGET_SCHED_REASSOCIATION_WIDTH rs6000_reassociation_width
1869 #undef TARGET_TRAMPOLINE_INIT
1870 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1872 #undef TARGET_FUNCTION_VALUE
1873 #define TARGET_FUNCTION_VALUE rs6000_function_value
1875 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1876 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1878 #undef TARGET_OPTION_SAVE
1879 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1881 #undef TARGET_OPTION_RESTORE
1882 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1884 #undef TARGET_OPTION_PRINT
1885 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1887 #undef TARGET_CAN_INLINE_P
1888 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1890 #undef TARGET_SET_CURRENT_FUNCTION
1891 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1893 #undef TARGET_LEGITIMATE_CONSTANT_P
1894 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1896 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
1897 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK rs6000_vectorize_vec_perm_const_ok
1899 #undef TARGET_CAN_USE_DOLOOP_P
1900 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1902 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
1903 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV rs6000_atomic_assign_expand_fenv
1905 #undef TARGET_LIBGCC_CMP_RETURN_MODE
1906 #define TARGET_LIBGCC_CMP_RETURN_MODE rs6000_abi_word_mode
1907 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
1908 #define TARGET_LIBGCC_SHIFT_COUNT_MODE rs6000_abi_word_mode
1909 #undef TARGET_UNWIND_WORD_MODE
1910 #define TARGET_UNWIND_WORD_MODE rs6000_abi_word_mode
1912 #undef TARGET_OFFLOAD_OPTIONS
1913 #define TARGET_OFFLOAD_OPTIONS rs6000_offload_options
1915 #undef TARGET_C_MODE_FOR_SUFFIX
1916 #define TARGET_C_MODE_FOR_SUFFIX rs6000_c_mode_for_suffix
1918 #undef TARGET_INVALID_BINARY_OP
1919 #define TARGET_INVALID_BINARY_OP rs6000_invalid_binary_op
1921 #undef TARGET_OPTAB_SUPPORTED_P
1922 #define TARGET_OPTAB_SUPPORTED_P rs6000_optab_supported_p
1924 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
1925 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
1927 #undef TARGET_COMPARE_VERSION_PRIORITY
1928 #define TARGET_COMPARE_VERSION_PRIORITY rs6000_compare_version_priority
1930 #undef TARGET_GENERATE_VERSION_DISPATCHER_BODY
1931 #define TARGET_GENERATE_VERSION_DISPATCHER_BODY \
1932 rs6000_generate_version_dispatcher_body
1934 #undef TARGET_GET_FUNCTION_VERSIONS_DISPATCHER
1935 #define TARGET_GET_FUNCTION_VERSIONS_DISPATCHER \
1936 rs6000_get_function_versions_dispatcher
1938 #undef TARGET_OPTION_FUNCTION_VERSIONS
1939 #define TARGET_OPTION_FUNCTION_VERSIONS common_function_versions
1941 #undef TARGET_HARD_REGNO_NREGS
1942 #define TARGET_HARD_REGNO_NREGS rs6000_hard_regno_nregs_hook
1943 #undef TARGET_HARD_REGNO_MODE_OK
1944 #define TARGET_HARD_REGNO_MODE_OK rs6000_hard_regno_mode_ok
1946 #undef TARGET_MODES_TIEABLE_P
1947 #define TARGET_MODES_TIEABLE_P rs6000_modes_tieable_p
1949 #undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED
1950 #define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \
1951 rs6000_hard_regno_call_part_clobbered
1953 #undef TARGET_SLOW_UNALIGNED_ACCESS
1954 #define TARGET_SLOW_UNALIGNED_ACCESS rs6000_slow_unaligned_access
1956 #undef TARGET_CAN_CHANGE_MODE_CLASS
1957 #define TARGET_CAN_CHANGE_MODE_CLASS rs6000_can_change_mode_class
1959 #undef TARGET_CONSTANT_ALIGNMENT
1960 #define TARGET_CONSTANT_ALIGNMENT rs6000_constant_alignment
1963 /* Processor table. */
1966 const char *const name
; /* Canonical processor name. */
1967 const enum processor_type processor
; /* Processor type enum value. */
1968 const HOST_WIDE_INT target_enable
; /* Target flags to enable. */
1971 static struct rs6000_ptt
const processor_target_table
[] =
1973 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1974 #include "rs6000-cpus.def"
1978 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
1982 rs6000_cpu_name_lookup (const char *name
)
1988 for (i
= 0; i
< ARRAY_SIZE (processor_target_table
); i
++)
1989 if (! strcmp (name
, processor_target_table
[i
].name
))
1997 /* Return number of consecutive hard regs needed starting at reg REGNO
1998 to hold something of mode MODE.
1999 This is ordinarily the length in words of a value of mode MODE
2000 but can be less for certain modes in special long registers.
2002 POWER and PowerPC GPRs hold 32 bits worth;
2003 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
2006 rs6000_hard_regno_nregs_internal (int regno
, machine_mode mode
)
2008 unsigned HOST_WIDE_INT reg_size
;
2010 /* 128-bit floating point usually takes 2 registers, unless it is IEEE
2011 128-bit floating point that can go in vector registers, which has VSX
2012 memory addressing. */
2013 if (FP_REGNO_P (regno
))
2014 reg_size
= (VECTOR_MEM_VSX_P (mode
) || FLOAT128_VECTOR_P (mode
)
2015 ? UNITS_PER_VSX_WORD
2016 : UNITS_PER_FP_WORD
);
2018 else if (ALTIVEC_REGNO_P (regno
))
2019 reg_size
= UNITS_PER_ALTIVEC_WORD
;
2022 reg_size
= UNITS_PER_WORD
;
2024 return (GET_MODE_SIZE (mode
) + reg_size
- 1) / reg_size
;
2027 /* Value is 1 if hard register REGNO can hold a value of machine-mode
2030 rs6000_hard_regno_mode_ok_uncached (int regno
, machine_mode mode
)
2032 int last_regno
= regno
+ rs6000_hard_regno_nregs
[mode
][regno
] - 1;
2034 if (COMPLEX_MODE_P (mode
))
2035 mode
= GET_MODE_INNER (mode
);
2037 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
2038 register combinations, and use PTImode where we need to deal with quad
2039 word memory operations. Don't allow quad words in the argument or frame
2040 pointer registers, just registers 0..31. */
2041 if (mode
== PTImode
)
2042 return (IN_RANGE (regno
, FIRST_GPR_REGNO
, LAST_GPR_REGNO
)
2043 && IN_RANGE (last_regno
, FIRST_GPR_REGNO
, LAST_GPR_REGNO
)
2044 && ((regno
& 1) == 0));
2046 /* VSX registers that overlap the FPR registers are larger than for non-VSX
2047 implementations. Don't allow an item to be split between a FP register
2048 and an Altivec register. Allow TImode in all VSX registers if the user
2050 if (TARGET_VSX
&& VSX_REGNO_P (regno
)
2051 && (VECTOR_MEM_VSX_P (mode
)
2052 || FLOAT128_VECTOR_P (mode
)
2053 || reg_addr
[mode
].scalar_in_vmx_p
2055 || (TARGET_VADDUQM
&& mode
== V1TImode
)))
2057 if (FP_REGNO_P (regno
))
2058 return FP_REGNO_P (last_regno
);
2060 if (ALTIVEC_REGNO_P (regno
))
2062 if (GET_MODE_SIZE (mode
) != 16 && !reg_addr
[mode
].scalar_in_vmx_p
)
2065 return ALTIVEC_REGNO_P (last_regno
);
2069 /* The GPRs can hold any mode, but values bigger than one register
2070 cannot go past R31. */
2071 if (INT_REGNO_P (regno
))
2072 return INT_REGNO_P (last_regno
);
2074 /* The float registers (except for VSX vector modes) can only hold floating
2075 modes and DImode. */
2076 if (FP_REGNO_P (regno
))
2078 if (FLOAT128_VECTOR_P (mode
))
2081 if (SCALAR_FLOAT_MODE_P (mode
)
2082 && (mode
!= TDmode
|| (regno
% 2) == 0)
2083 && FP_REGNO_P (last_regno
))
2086 if (GET_MODE_CLASS (mode
) == MODE_INT
)
2088 if(GET_MODE_SIZE (mode
) == UNITS_PER_FP_WORD
)
2091 if (TARGET_P8_VECTOR
&& (mode
== SImode
))
2094 if (TARGET_P9_VECTOR
&& (mode
== QImode
|| mode
== HImode
))
2098 if (PAIRED_SIMD_REGNO_P (regno
) && TARGET_PAIRED_FLOAT
2099 && PAIRED_VECTOR_MODE (mode
))
2105 /* The CR register can only hold CC modes. */
2106 if (CR_REGNO_P (regno
))
2107 return GET_MODE_CLASS (mode
) == MODE_CC
;
2109 if (CA_REGNO_P (regno
))
2110 return mode
== Pmode
|| mode
== SImode
;
2112 /* AltiVec only in AldyVec registers. */
2113 if (ALTIVEC_REGNO_P (regno
))
2114 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode
)
2115 || mode
== V1TImode
);
2117 /* We cannot put non-VSX TImode or PTImode anywhere except general register
2118 and it must be able to fit within the register set. */
2120 return GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
;
2123 /* Implement TARGET_HARD_REGNO_NREGS. */
2126 rs6000_hard_regno_nregs_hook (unsigned int regno
, machine_mode mode
)
2128 return rs6000_hard_regno_nregs
[mode
][regno
];
2131 /* Implement TARGET_HARD_REGNO_MODE_OK. */
2134 rs6000_hard_regno_mode_ok (unsigned int regno
, machine_mode mode
)
2136 return rs6000_hard_regno_mode_ok_p
[mode
][regno
];
2139 /* Implement TARGET_MODES_TIEABLE_P.
2141 PTImode cannot tie with other modes because PTImode is restricted to even
2142 GPR registers, and TImode can go in any GPR as well as VSX registers (PR
2145 Altivec/VSX vector tests were moved ahead of scalar float mode, so that IEEE
2146 128-bit floating point on VSX systems ties with other vectors. */
2149 rs6000_modes_tieable_p (machine_mode mode1
, machine_mode mode2
)
2151 if (mode1
== PTImode
)
2152 return mode2
== PTImode
;
2153 if (mode2
== PTImode
)
2156 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode1
))
2157 return ALTIVEC_OR_VSX_VECTOR_MODE (mode2
);
2158 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode2
))
2161 if (SCALAR_FLOAT_MODE_P (mode1
))
2162 return SCALAR_FLOAT_MODE_P (mode2
);
2163 if (SCALAR_FLOAT_MODE_P (mode2
))
2166 if (GET_MODE_CLASS (mode1
) == MODE_CC
)
2167 return GET_MODE_CLASS (mode2
) == MODE_CC
;
2168 if (GET_MODE_CLASS (mode2
) == MODE_CC
)
2171 if (PAIRED_VECTOR_MODE (mode1
))
2172 return PAIRED_VECTOR_MODE (mode2
);
2173 if (PAIRED_VECTOR_MODE (mode2
))
2179 /* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED. */
2182 rs6000_hard_regno_call_part_clobbered (unsigned int regno
, machine_mode mode
)
2186 && GET_MODE_SIZE (mode
) > 4
2187 && INT_REGNO_P (regno
))
2191 && FP_REGNO_P (regno
)
2192 && GET_MODE_SIZE (mode
) > 8
2193 && !FLOAT128_2REG_P (mode
))
2199 /* Print interesting facts about registers. */
2201 rs6000_debug_reg_print (int first_regno
, int last_regno
, const char *reg_name
)
2205 for (r
= first_regno
; r
<= last_regno
; ++r
)
2207 const char *comma
= "";
2210 if (first_regno
== last_regno
)
2211 fprintf (stderr
, "%s:\t", reg_name
);
2213 fprintf (stderr
, "%s%d:\t", reg_name
, r
- first_regno
);
2216 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2217 if (rs6000_hard_regno_mode_ok_p
[m
][r
] && rs6000_hard_regno_nregs
[m
][r
])
2221 fprintf (stderr
, ",\n\t");
2226 if (rs6000_hard_regno_nregs
[m
][r
] > 1)
2227 len
+= fprintf (stderr
, "%s%s/%d", comma
, GET_MODE_NAME (m
),
2228 rs6000_hard_regno_nregs
[m
][r
]);
2230 len
+= fprintf (stderr
, "%s%s", comma
, GET_MODE_NAME (m
));
2235 if (call_used_regs
[r
])
2239 fprintf (stderr
, ",\n\t");
2244 len
+= fprintf (stderr
, "%s%s", comma
, "call-used");
2252 fprintf (stderr
, ",\n\t");
2257 len
+= fprintf (stderr
, "%s%s", comma
, "fixed");
2263 fprintf (stderr
, ",\n\t");
2267 len
+= fprintf (stderr
, "%sreg-class = %s", comma
,
2268 reg_class_names
[(int)rs6000_regno_regclass
[r
]]);
2273 fprintf (stderr
, ",\n\t");
2277 fprintf (stderr
, "%sregno = %d\n", comma
, r
);
2282 rs6000_debug_vector_unit (enum rs6000_vector v
)
2288 case VECTOR_NONE
: ret
= "none"; break;
2289 case VECTOR_ALTIVEC
: ret
= "altivec"; break;
2290 case VECTOR_VSX
: ret
= "vsx"; break;
2291 case VECTOR_P8_VECTOR
: ret
= "p8_vector"; break;
2292 case VECTOR_PAIRED
: ret
= "paired"; break;
2293 case VECTOR_OTHER
: ret
= "other"; break;
2294 default: ret
= "unknown"; break;
2300 /* Inner function printing just the address mask for a particular reload
2302 DEBUG_FUNCTION
char *
2303 rs6000_debug_addr_mask (addr_mask_type mask
, bool keep_spaces
)
2308 if ((mask
& RELOAD_REG_VALID
) != 0)
2310 else if (keep_spaces
)
2313 if ((mask
& RELOAD_REG_MULTIPLE
) != 0)
2315 else if (keep_spaces
)
2318 if ((mask
& RELOAD_REG_INDEXED
) != 0)
2320 else if (keep_spaces
)
2323 if ((mask
& RELOAD_REG_QUAD_OFFSET
) != 0)
2325 else if ((mask
& RELOAD_REG_OFFSET
) != 0)
2327 else if (keep_spaces
)
2330 if ((mask
& RELOAD_REG_PRE_INCDEC
) != 0)
2332 else if (keep_spaces
)
2335 if ((mask
& RELOAD_REG_PRE_MODIFY
) != 0)
2337 else if (keep_spaces
)
2340 if ((mask
& RELOAD_REG_AND_M16
) != 0)
2342 else if (keep_spaces
)
2350 /* Print the address masks in a human readble fashion. */
2352 rs6000_debug_print_mode (ssize_t m
)
2358 fprintf (stderr
, "Mode: %-5s", GET_MODE_NAME (m
));
2359 for (rc
= 0; rc
< N_RELOAD_REG
; rc
++)
2360 fprintf (stderr
, " %s: %s", reload_reg_map
[rc
].name
,
2361 rs6000_debug_addr_mask (reg_addr
[m
].addr_mask
[rc
], true));
2363 if ((reg_addr
[m
].reload_store
!= CODE_FOR_nothing
)
2364 || (reg_addr
[m
].reload_load
!= CODE_FOR_nothing
))
2365 fprintf (stderr
, " Reload=%c%c",
2366 (reg_addr
[m
].reload_store
!= CODE_FOR_nothing
) ? 's' : '*',
2367 (reg_addr
[m
].reload_load
!= CODE_FOR_nothing
) ? 'l' : '*');
2369 spaces
+= sizeof (" Reload=sl") - 1;
2371 if (reg_addr
[m
].scalar_in_vmx_p
)
2373 fprintf (stderr
, "%*s Upper=y", spaces
, "");
2377 spaces
+= sizeof (" Upper=y") - 1;
2379 fuse_extra_p
= ((reg_addr
[m
].fusion_gpr_ld
!= CODE_FOR_nothing
)
2380 || reg_addr
[m
].fused_toc
);
2383 for (rc
= 0; rc
< N_RELOAD_REG
; rc
++)
2385 if (rc
!= RELOAD_REG_ANY
)
2387 if (reg_addr
[m
].fusion_addi_ld
[rc
] != CODE_FOR_nothing
2388 || reg_addr
[m
].fusion_addi_ld
[rc
] != CODE_FOR_nothing
2389 || reg_addr
[m
].fusion_addi_st
[rc
] != CODE_FOR_nothing
2390 || reg_addr
[m
].fusion_addis_ld
[rc
] != CODE_FOR_nothing
2391 || reg_addr
[m
].fusion_addis_st
[rc
] != CODE_FOR_nothing
)
2393 fuse_extra_p
= true;
2402 fprintf (stderr
, "%*s Fuse:", spaces
, "");
2405 for (rc
= 0; rc
< N_RELOAD_REG
; rc
++)
2407 if (rc
!= RELOAD_REG_ANY
)
2411 if (reg_addr
[m
].fusion_addis_ld
[rc
] != CODE_FOR_nothing
)
2413 else if (reg_addr
[m
].fusion_addi_ld
[rc
] != CODE_FOR_nothing
)
2418 if (reg_addr
[m
].fusion_addis_st
[rc
] != CODE_FOR_nothing
)
2420 else if (reg_addr
[m
].fusion_addi_st
[rc
] != CODE_FOR_nothing
)
2425 if (load
== '-' && store
== '-')
2429 fprintf (stderr
, "%*s%c=%c%c", (spaces
+ 1), "",
2430 reload_reg_map
[rc
].name
[0], load
, store
);
2436 if (reg_addr
[m
].fusion_gpr_ld
!= CODE_FOR_nothing
)
2438 fprintf (stderr
, "%*sP8gpr", (spaces
+ 1), "");
2442 spaces
+= sizeof (" P8gpr") - 1;
2444 if (reg_addr
[m
].fused_toc
)
2446 fprintf (stderr
, "%*sToc", (spaces
+ 1), "");
2450 spaces
+= sizeof (" Toc") - 1;
2453 spaces
+= sizeof (" Fuse: G=ls F=ls v=ls P8gpr Toc") - 1;
2455 if (rs6000_vector_unit
[m
] != VECTOR_NONE
2456 || rs6000_vector_mem
[m
] != VECTOR_NONE
)
2458 fprintf (stderr
, "%*s vector: arith=%-10s mem=%s",
2460 rs6000_debug_vector_unit (rs6000_vector_unit
[m
]),
2461 rs6000_debug_vector_unit (rs6000_vector_mem
[m
]));
2464 fputs ("\n", stderr
);
2467 #define DEBUG_FMT_ID "%-32s= "
2468 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
2469 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
2470 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
2472 /* Print various interesting information with -mdebug=reg. */
2474 rs6000_debug_reg_global (void)
2476 static const char *const tf
[2] = { "false", "true" };
2477 const char *nl
= (const char *)0;
2480 char costly_num
[20];
2482 char flags_buffer
[40];
2483 const char *costly_str
;
2484 const char *nop_str
;
2485 const char *trace_str
;
2486 const char *abi_str
;
2487 const char *cmodel_str
;
2488 struct cl_target_option cl_opts
;
2490 /* Modes we want tieable information on. */
2491 static const machine_mode print_tieable_modes
[] = {
2527 /* Virtual regs we are interested in. */
2528 const static struct {
2529 int regno
; /* register number. */
2530 const char *name
; /* register name. */
2531 } virtual_regs
[] = {
2532 { STACK_POINTER_REGNUM
, "stack pointer:" },
2533 { TOC_REGNUM
, "toc: " },
2534 { STATIC_CHAIN_REGNUM
, "static chain: " },
2535 { RS6000_PIC_OFFSET_TABLE_REGNUM
, "pic offset: " },
2536 { HARD_FRAME_POINTER_REGNUM
, "hard frame: " },
2537 { ARG_POINTER_REGNUM
, "arg pointer: " },
2538 { FRAME_POINTER_REGNUM
, "frame pointer:" },
2539 { FIRST_PSEUDO_REGISTER
, "first pseudo: " },
2540 { FIRST_VIRTUAL_REGISTER
, "first virtual:" },
2541 { VIRTUAL_INCOMING_ARGS_REGNUM
, "incoming_args:" },
2542 { VIRTUAL_STACK_VARS_REGNUM
, "stack_vars: " },
2543 { VIRTUAL_STACK_DYNAMIC_REGNUM
, "stack_dynamic:" },
2544 { VIRTUAL_OUTGOING_ARGS_REGNUM
, "outgoing_args:" },
2545 { VIRTUAL_CFA_REGNUM
, "cfa (frame): " },
2546 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM
, "stack boundry:" },
2547 { LAST_VIRTUAL_REGISTER
, "last virtual: " },
2550 fputs ("\nHard register information:\n", stderr
);
2551 rs6000_debug_reg_print (FIRST_GPR_REGNO
, LAST_GPR_REGNO
, "gr");
2552 rs6000_debug_reg_print (FIRST_FPR_REGNO
, LAST_FPR_REGNO
, "fp");
2553 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO
,
2556 rs6000_debug_reg_print (LR_REGNO
, LR_REGNO
, "lr");
2557 rs6000_debug_reg_print (CTR_REGNO
, CTR_REGNO
, "ctr");
2558 rs6000_debug_reg_print (CR0_REGNO
, CR7_REGNO
, "cr");
2559 rs6000_debug_reg_print (CA_REGNO
, CA_REGNO
, "ca");
2560 rs6000_debug_reg_print (VRSAVE_REGNO
, VRSAVE_REGNO
, "vrsave");
2561 rs6000_debug_reg_print (VSCR_REGNO
, VSCR_REGNO
, "vscr");
2563 fputs ("\nVirtual/stack/frame registers:\n", stderr
);
2564 for (v
= 0; v
< ARRAY_SIZE (virtual_regs
); v
++)
2565 fprintf (stderr
, "%s regno = %3d\n", virtual_regs
[v
].name
, virtual_regs
[v
].regno
);
2569 "d reg_class = %s\n"
2570 "f reg_class = %s\n"
2571 "v reg_class = %s\n"
2572 "wa reg_class = %s\n"
2573 "wb reg_class = %s\n"
2574 "wd reg_class = %s\n"
2575 "we reg_class = %s\n"
2576 "wf reg_class = %s\n"
2577 "wg reg_class = %s\n"
2578 "wh reg_class = %s\n"
2579 "wi reg_class = %s\n"
2580 "wj reg_class = %s\n"
2581 "wk reg_class = %s\n"
2582 "wl reg_class = %s\n"
2583 "wm reg_class = %s\n"
2584 "wo reg_class = %s\n"
2585 "wp reg_class = %s\n"
2586 "wq reg_class = %s\n"
2587 "wr reg_class = %s\n"
2588 "ws reg_class = %s\n"
2589 "wt reg_class = %s\n"
2590 "wu reg_class = %s\n"
2591 "wv reg_class = %s\n"
2592 "ww reg_class = %s\n"
2593 "wx reg_class = %s\n"
2594 "wy reg_class = %s\n"
2595 "wz reg_class = %s\n"
2596 "wA reg_class = %s\n"
2597 "wH reg_class = %s\n"
2598 "wI reg_class = %s\n"
2599 "wJ reg_class = %s\n"
2600 "wK reg_class = %s\n"
2602 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_d
]],
2603 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_f
]],
2604 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_v
]],
2605 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wa
]],
2606 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wb
]],
2607 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wd
]],
2608 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_we
]],
2609 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wf
]],
2610 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wg
]],
2611 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wh
]],
2612 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wi
]],
2613 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wj
]],
2614 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wk
]],
2615 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wl
]],
2616 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wm
]],
2617 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wo
]],
2618 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wp
]],
2619 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wq
]],
2620 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wr
]],
2621 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_ws
]],
2622 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wt
]],
2623 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wu
]],
2624 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wv
]],
2625 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_ww
]],
2626 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wx
]],
2627 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wy
]],
2628 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wz
]],
2629 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wA
]],
2630 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wH
]],
2631 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wI
]],
2632 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wJ
]],
2633 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wK
]]);
2636 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2637 rs6000_debug_print_mode (m
);
2639 fputs ("\n", stderr
);
2641 for (m1
= 0; m1
< ARRAY_SIZE (print_tieable_modes
); m1
++)
2643 machine_mode mode1
= print_tieable_modes
[m1
];
2644 bool first_time
= true;
2646 nl
= (const char *)0;
2647 for (m2
= 0; m2
< ARRAY_SIZE (print_tieable_modes
); m2
++)
2649 machine_mode mode2
= print_tieable_modes
[m2
];
2650 if (mode1
!= mode2
&& rs6000_modes_tieable_p (mode1
, mode2
))
2654 fprintf (stderr
, "Tieable modes %s:", GET_MODE_NAME (mode1
));
2659 fprintf (stderr
, " %s", GET_MODE_NAME (mode2
));
2664 fputs ("\n", stderr
);
2670 if (rs6000_recip_control
)
2672 fprintf (stderr
, "\nReciprocal mask = 0x%x\n", rs6000_recip_control
);
2674 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2675 if (rs6000_recip_bits
[m
])
2678 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2680 (RS6000_RECIP_AUTO_RE_P (m
)
2682 : (RS6000_RECIP_HAVE_RE_P (m
) ? "have" : "none")),
2683 (RS6000_RECIP_AUTO_RSQRTE_P (m
)
2685 : (RS6000_RECIP_HAVE_RSQRTE_P (m
) ? "have" : "none")));
2688 fputs ("\n", stderr
);
2691 if (rs6000_cpu_index
>= 0)
2693 const char *name
= processor_target_table
[rs6000_cpu_index
].name
;
2695 = processor_target_table
[rs6000_cpu_index
].target_enable
;
2697 sprintf (flags_buffer
, "-mcpu=%s flags", name
);
2698 rs6000_print_isa_options (stderr
, 0, flags_buffer
, flags
);
2701 fprintf (stderr
, DEBUG_FMT_S
, "cpu", "<none>");
2703 if (rs6000_tune_index
>= 0)
2705 const char *name
= processor_target_table
[rs6000_tune_index
].name
;
2707 = processor_target_table
[rs6000_tune_index
].target_enable
;
2709 sprintf (flags_buffer
, "-mtune=%s flags", name
);
2710 rs6000_print_isa_options (stderr
, 0, flags_buffer
, flags
);
2713 fprintf (stderr
, DEBUG_FMT_S
, "tune", "<none>");
2715 cl_target_option_save (&cl_opts
, &global_options
);
2716 rs6000_print_isa_options (stderr
, 0, "rs6000_isa_flags",
2719 rs6000_print_isa_options (stderr
, 0, "rs6000_isa_flags_explicit",
2720 rs6000_isa_flags_explicit
);
2722 rs6000_print_builtin_options (stderr
, 0, "rs6000_builtin_mask",
2723 rs6000_builtin_mask
);
2725 rs6000_print_isa_options (stderr
, 0, "TARGET_DEFAULT", TARGET_DEFAULT
);
2727 fprintf (stderr
, DEBUG_FMT_S
, "--with-cpu default",
2728 OPTION_TARGET_CPU_DEFAULT
? OPTION_TARGET_CPU_DEFAULT
: "<none>");
2730 switch (rs6000_sched_costly_dep
)
2732 case max_dep_latency
:
2733 costly_str
= "max_dep_latency";
2737 costly_str
= "no_dep_costly";
2740 case all_deps_costly
:
2741 costly_str
= "all_deps_costly";
2744 case true_store_to_load_dep_costly
:
2745 costly_str
= "true_store_to_load_dep_costly";
2748 case store_to_load_dep_costly
:
2749 costly_str
= "store_to_load_dep_costly";
2753 costly_str
= costly_num
;
2754 sprintf (costly_num
, "%d", (int)rs6000_sched_costly_dep
);
2758 fprintf (stderr
, DEBUG_FMT_S
, "sched_costly_dep", costly_str
);
2760 switch (rs6000_sched_insert_nops
)
2762 case sched_finish_regroup_exact
:
2763 nop_str
= "sched_finish_regroup_exact";
2766 case sched_finish_pad_groups
:
2767 nop_str
= "sched_finish_pad_groups";
2770 case sched_finish_none
:
2771 nop_str
= "sched_finish_none";
2776 sprintf (nop_num
, "%d", (int)rs6000_sched_insert_nops
);
2780 fprintf (stderr
, DEBUG_FMT_S
, "sched_insert_nops", nop_str
);
2782 switch (rs6000_sdata
)
2789 fprintf (stderr
, DEBUG_FMT_S
, "sdata", "data");
2793 fprintf (stderr
, DEBUG_FMT_S
, "sdata", "sysv");
2797 fprintf (stderr
, DEBUG_FMT_S
, "sdata", "eabi");
2802 switch (rs6000_traceback
)
2804 case traceback_default
: trace_str
= "default"; break;
2805 case traceback_none
: trace_str
= "none"; break;
2806 case traceback_part
: trace_str
= "part"; break;
2807 case traceback_full
: trace_str
= "full"; break;
2808 default: trace_str
= "unknown"; break;
2811 fprintf (stderr
, DEBUG_FMT_S
, "traceback", trace_str
);
2813 switch (rs6000_current_cmodel
)
2815 case CMODEL_SMALL
: cmodel_str
= "small"; break;
2816 case CMODEL_MEDIUM
: cmodel_str
= "medium"; break;
2817 case CMODEL_LARGE
: cmodel_str
= "large"; break;
2818 default: cmodel_str
= "unknown"; break;
2821 fprintf (stderr
, DEBUG_FMT_S
, "cmodel", cmodel_str
);
2823 switch (rs6000_current_abi
)
2825 case ABI_NONE
: abi_str
= "none"; break;
2826 case ABI_AIX
: abi_str
= "aix"; break;
2827 case ABI_ELFv2
: abi_str
= "ELFv2"; break;
2828 case ABI_V4
: abi_str
= "V4"; break;
2829 case ABI_DARWIN
: abi_str
= "darwin"; break;
2830 default: abi_str
= "unknown"; break;
2833 fprintf (stderr
, DEBUG_FMT_S
, "abi", abi_str
);
2835 if (rs6000_altivec_abi
)
2836 fprintf (stderr
, DEBUG_FMT_S
, "altivec_abi", "true");
2838 if (rs6000_darwin64_abi
)
2839 fprintf (stderr
, DEBUG_FMT_S
, "darwin64_abi", "true");
2841 fprintf (stderr
, DEBUG_FMT_S
, "single_float",
2842 (TARGET_SINGLE_FLOAT
? "true" : "false"));
2844 fprintf (stderr
, DEBUG_FMT_S
, "double_float",
2845 (TARGET_DOUBLE_FLOAT
? "true" : "false"));
2847 fprintf (stderr
, DEBUG_FMT_S
, "soft_float",
2848 (TARGET_SOFT_FLOAT
? "true" : "false"));
2850 if (TARGET_LINK_STACK
)
2851 fprintf (stderr
, DEBUG_FMT_S
, "link_stack", "true");
2853 if (TARGET_P8_FUSION
)
2857 strcpy (options
, (TARGET_P9_FUSION
) ? "power9" : "power8");
2858 if (TARGET_TOC_FUSION
)
2859 strcat (options
, ", toc");
2861 if (TARGET_P8_FUSION_SIGN
)
2862 strcat (options
, ", sign");
2864 fprintf (stderr
, DEBUG_FMT_S
, "fusion", options
);
2867 fprintf (stderr
, DEBUG_FMT_S
, "plt-format",
2868 TARGET_SECURE_PLT
? "secure" : "bss");
2869 fprintf (stderr
, DEBUG_FMT_S
, "struct-return",
2870 aix_struct_return
? "aix" : "sysv");
2871 fprintf (stderr
, DEBUG_FMT_S
, "always_hint", tf
[!!rs6000_always_hint
]);
2872 fprintf (stderr
, DEBUG_FMT_S
, "sched_groups", tf
[!!rs6000_sched_groups
]);
2873 fprintf (stderr
, DEBUG_FMT_S
, "align_branch",
2874 tf
[!!rs6000_align_branch_targets
]);
2875 fprintf (stderr
, DEBUG_FMT_D
, "tls_size", rs6000_tls_size
);
2876 fprintf (stderr
, DEBUG_FMT_D
, "long_double_size",
2877 rs6000_long_double_type_size
);
2878 fprintf (stderr
, DEBUG_FMT_D
, "sched_restricted_insns_priority",
2879 (int)rs6000_sched_restricted_insns_priority
);
2880 fprintf (stderr
, DEBUG_FMT_D
, "Number of standard builtins",
2882 fprintf (stderr
, DEBUG_FMT_D
, "Number of rs6000 builtins",
2883 (int)RS6000_BUILTIN_COUNT
);
2885 fprintf (stderr
, DEBUG_FMT_D
, "Enable float128 on VSX",
2886 (int)TARGET_FLOAT128_ENABLE_TYPE
);
2889 fprintf (stderr
, DEBUG_FMT_D
, "VSX easy 64-bit scalar element",
2890 (int)VECTOR_ELEMENT_SCALAR_64BIT
);
2892 if (TARGET_DIRECT_MOVE_128
)
2893 fprintf (stderr
, DEBUG_FMT_D
, "VSX easy 64-bit mfvsrld element",
2894 (int)VECTOR_ELEMENT_MFVSRLD_64BIT
);
2898 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2899 legitimate address support to figure out the appropriate addressing to
2903 rs6000_setup_reg_addr_masks (void)
2905 ssize_t rc
, reg
, m
, nregs
;
2906 addr_mask_type any_addr_mask
, addr_mask
;
2908 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2910 machine_mode m2
= (machine_mode
) m
;
2911 bool complex_p
= false;
2912 bool small_int_p
= (m2
== QImode
|| m2
== HImode
|| m2
== SImode
);
2915 if (COMPLEX_MODE_P (m2
))
2918 m2
= GET_MODE_INNER (m2
);
2921 msize
= GET_MODE_SIZE (m2
);
2923 /* SDmode is special in that we want to access it only via REG+REG
2924 addressing on power7 and above, since we want to use the LFIWZX and
2925 STFIWZX instructions to load it. */
2926 bool indexed_only_p
= (m
== SDmode
&& TARGET_NO_SDMODE_STACK
);
2929 for (rc
= FIRST_RELOAD_REG_CLASS
; rc
<= LAST_RELOAD_REG_CLASS
; rc
++)
2932 reg
= reload_reg_map
[rc
].reg
;
2934 /* Can mode values go in the GPR/FPR/Altivec registers? */
2935 if (reg
>= 0 && rs6000_hard_regno_mode_ok_p
[m
][reg
])
2937 bool small_int_vsx_p
= (small_int_p
2938 && (rc
== RELOAD_REG_FPR
2939 || rc
== RELOAD_REG_VMX
));
2941 nregs
= rs6000_hard_regno_nregs
[m
][reg
];
2942 addr_mask
|= RELOAD_REG_VALID
;
2944 /* Indicate if the mode takes more than 1 physical register. If
2945 it takes a single register, indicate it can do REG+REG
2946 addressing. Small integers in VSX registers can only do
2947 REG+REG addressing. */
2948 if (small_int_vsx_p
)
2949 addr_mask
|= RELOAD_REG_INDEXED
;
2950 else if (nregs
> 1 || m
== BLKmode
|| complex_p
)
2951 addr_mask
|= RELOAD_REG_MULTIPLE
;
2953 addr_mask
|= RELOAD_REG_INDEXED
;
2955 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2956 addressing. If we allow scalars into Altivec registers,
2957 don't allow PRE_INC, PRE_DEC, or PRE_MODIFY. */
2960 && (rc
== RELOAD_REG_GPR
|| rc
== RELOAD_REG_FPR
)
2962 && !VECTOR_MODE_P (m2
)
2963 && !FLOAT128_VECTOR_P (m2
)
2965 && !small_int_vsx_p
)
2967 addr_mask
|= RELOAD_REG_PRE_INCDEC
;
2969 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2970 we don't allow PRE_MODIFY for some multi-register
2975 addr_mask
|= RELOAD_REG_PRE_MODIFY
;
2979 if (TARGET_POWERPC64
)
2980 addr_mask
|= RELOAD_REG_PRE_MODIFY
;
2986 addr_mask
|= RELOAD_REG_PRE_MODIFY
;
2992 /* GPR and FPR registers can do REG+OFFSET addressing, except
2993 possibly for SDmode. ISA 3.0 (i.e. power9) adds D-form addressing
2994 for 64-bit scalars and 32-bit SFmode to altivec registers. */
2995 if ((addr_mask
!= 0) && !indexed_only_p
2997 && (rc
== RELOAD_REG_GPR
2998 || ((msize
== 8 || m2
== SFmode
)
2999 && (rc
== RELOAD_REG_FPR
3000 || (rc
== RELOAD_REG_VMX
&& TARGET_P9_VECTOR
)))))
3001 addr_mask
|= RELOAD_REG_OFFSET
;
3003 /* VSX registers can do REG+OFFSET addresssing if ISA 3.0
3004 instructions are enabled. The offset for 128-bit VSX registers is
3005 only 12-bits. While GPRs can handle the full offset range, VSX
3006 registers can only handle the restricted range. */
3007 else if ((addr_mask
!= 0) && !indexed_only_p
3008 && msize
== 16 && TARGET_P9_VECTOR
3009 && (ALTIVEC_OR_VSX_VECTOR_MODE (m2
)
3010 || (m2
== TImode
&& TARGET_VSX
)))
3012 addr_mask
|= RELOAD_REG_OFFSET
;
3013 if (rc
== RELOAD_REG_FPR
|| rc
== RELOAD_REG_VMX
)
3014 addr_mask
|= RELOAD_REG_QUAD_OFFSET
;
3017 /* VMX registers can do (REG & -16) and ((REG+REG) & -16)
3018 addressing on 128-bit types. */
3019 if (rc
== RELOAD_REG_VMX
&& msize
== 16
3020 && (addr_mask
& RELOAD_REG_VALID
) != 0)
3021 addr_mask
|= RELOAD_REG_AND_M16
;
3023 reg_addr
[m
].addr_mask
[rc
] = addr_mask
;
3024 any_addr_mask
|= addr_mask
;
3027 reg_addr
[m
].addr_mask
[RELOAD_REG_ANY
] = any_addr_mask
;
3032 /* Initialize the various global tables that are based on register size. */
3034 rs6000_init_hard_regno_mode_ok (bool global_init_p
)
3040 /* Precalculate REGNO_REG_CLASS. */
3041 rs6000_regno_regclass
[0] = GENERAL_REGS
;
3042 for (r
= 1; r
< 32; ++r
)
3043 rs6000_regno_regclass
[r
] = BASE_REGS
;
3045 for (r
= 32; r
< 64; ++r
)
3046 rs6000_regno_regclass
[r
] = FLOAT_REGS
;
3048 for (r
= 64; r
< FIRST_PSEUDO_REGISTER
; ++r
)
3049 rs6000_regno_regclass
[r
] = NO_REGS
;
3051 for (r
= FIRST_ALTIVEC_REGNO
; r
<= LAST_ALTIVEC_REGNO
; ++r
)
3052 rs6000_regno_regclass
[r
] = ALTIVEC_REGS
;
3054 rs6000_regno_regclass
[CR0_REGNO
] = CR0_REGS
;
3055 for (r
= CR1_REGNO
; r
<= CR7_REGNO
; ++r
)
3056 rs6000_regno_regclass
[r
] = CR_REGS
;
3058 rs6000_regno_regclass
[LR_REGNO
] = LINK_REGS
;
3059 rs6000_regno_regclass
[CTR_REGNO
] = CTR_REGS
;
3060 rs6000_regno_regclass
[CA_REGNO
] = NO_REGS
;
3061 rs6000_regno_regclass
[VRSAVE_REGNO
] = VRSAVE_REGS
;
3062 rs6000_regno_regclass
[VSCR_REGNO
] = VRSAVE_REGS
;
3063 rs6000_regno_regclass
[TFHAR_REGNO
] = SPR_REGS
;
3064 rs6000_regno_regclass
[TFIAR_REGNO
] = SPR_REGS
;
3065 rs6000_regno_regclass
[TEXASR_REGNO
] = SPR_REGS
;
3066 rs6000_regno_regclass
[ARG_POINTER_REGNUM
] = BASE_REGS
;
3067 rs6000_regno_regclass
[FRAME_POINTER_REGNUM
] = BASE_REGS
;
3069 /* Precalculate register class to simpler reload register class. We don't
3070 need all of the register classes that are combinations of different
3071 classes, just the simple ones that have constraint letters. */
3072 for (c
= 0; c
< N_REG_CLASSES
; c
++)
3073 reg_class_to_reg_type
[c
] = NO_REG_TYPE
;
3075 reg_class_to_reg_type
[(int)GENERAL_REGS
] = GPR_REG_TYPE
;
3076 reg_class_to_reg_type
[(int)BASE_REGS
] = GPR_REG_TYPE
;
3077 reg_class_to_reg_type
[(int)VSX_REGS
] = VSX_REG_TYPE
;
3078 reg_class_to_reg_type
[(int)VRSAVE_REGS
] = SPR_REG_TYPE
;
3079 reg_class_to_reg_type
[(int)VSCR_REGS
] = SPR_REG_TYPE
;
3080 reg_class_to_reg_type
[(int)LINK_REGS
] = SPR_REG_TYPE
;
3081 reg_class_to_reg_type
[(int)CTR_REGS
] = SPR_REG_TYPE
;
3082 reg_class_to_reg_type
[(int)LINK_OR_CTR_REGS
] = SPR_REG_TYPE
;
3083 reg_class_to_reg_type
[(int)CR_REGS
] = CR_REG_TYPE
;
3084 reg_class_to_reg_type
[(int)CR0_REGS
] = CR_REG_TYPE
;
3088 reg_class_to_reg_type
[(int)FLOAT_REGS
] = VSX_REG_TYPE
;
3089 reg_class_to_reg_type
[(int)ALTIVEC_REGS
] = VSX_REG_TYPE
;
3093 reg_class_to_reg_type
[(int)FLOAT_REGS
] = FPR_REG_TYPE
;
3094 reg_class_to_reg_type
[(int)ALTIVEC_REGS
] = ALTIVEC_REG_TYPE
;
3097 /* Precalculate the valid memory formats as well as the vector information,
3098 this must be set up before the rs6000_hard_regno_nregs_internal calls
3100 gcc_assert ((int)VECTOR_NONE
== 0);
3101 memset ((void *) &rs6000_vector_unit
[0], '\0', sizeof (rs6000_vector_unit
));
3102 memset ((void *) &rs6000_vector_mem
[0], '\0', sizeof (rs6000_vector_unit
));
3104 gcc_assert ((int)CODE_FOR_nothing
== 0);
3105 memset ((void *) ®_addr
[0], '\0', sizeof (reg_addr
));
3107 gcc_assert ((int)NO_REGS
== 0);
3108 memset ((void *) &rs6000_constraints
[0], '\0', sizeof (rs6000_constraints
));
3110 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
3111 believes it can use native alignment or still uses 128-bit alignment. */
3112 if (TARGET_VSX
&& !TARGET_VSX_ALIGN_128
)
3123 /* KF mode (IEEE 128-bit in VSX registers). We do not have arithmetic, so
3124 only set the memory modes. Include TFmode if -mabi=ieeelongdouble. */
3125 if (TARGET_FLOAT128_TYPE
)
3127 rs6000_vector_mem
[KFmode
] = VECTOR_VSX
;
3128 rs6000_vector_align
[KFmode
] = 128;
3130 if (FLOAT128_IEEE_P (TFmode
))
3132 rs6000_vector_mem
[TFmode
] = VECTOR_VSX
;
3133 rs6000_vector_align
[TFmode
] = 128;
3137 /* V2DF mode, VSX only. */
3140 rs6000_vector_unit
[V2DFmode
] = VECTOR_VSX
;
3141 rs6000_vector_mem
[V2DFmode
] = VECTOR_VSX
;
3142 rs6000_vector_align
[V2DFmode
] = align64
;
3145 /* V4SF mode, either VSX or Altivec. */
3148 rs6000_vector_unit
[V4SFmode
] = VECTOR_VSX
;
3149 rs6000_vector_mem
[V4SFmode
] = VECTOR_VSX
;
3150 rs6000_vector_align
[V4SFmode
] = align32
;
3152 else if (TARGET_ALTIVEC
)
3154 rs6000_vector_unit
[V4SFmode
] = VECTOR_ALTIVEC
;
3155 rs6000_vector_mem
[V4SFmode
] = VECTOR_ALTIVEC
;
3156 rs6000_vector_align
[V4SFmode
] = align32
;
3159 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
3163 rs6000_vector_unit
[V4SImode
] = VECTOR_ALTIVEC
;
3164 rs6000_vector_unit
[V8HImode
] = VECTOR_ALTIVEC
;
3165 rs6000_vector_unit
[V16QImode
] = VECTOR_ALTIVEC
;
3166 rs6000_vector_align
[V4SImode
] = align32
;
3167 rs6000_vector_align
[V8HImode
] = align32
;
3168 rs6000_vector_align
[V16QImode
] = align32
;
3172 rs6000_vector_mem
[V4SImode
] = VECTOR_VSX
;
3173 rs6000_vector_mem
[V8HImode
] = VECTOR_VSX
;
3174 rs6000_vector_mem
[V16QImode
] = VECTOR_VSX
;
3178 rs6000_vector_mem
[V4SImode
] = VECTOR_ALTIVEC
;
3179 rs6000_vector_mem
[V8HImode
] = VECTOR_ALTIVEC
;
3180 rs6000_vector_mem
[V16QImode
] = VECTOR_ALTIVEC
;
3184 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
3185 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
3188 rs6000_vector_mem
[V2DImode
] = VECTOR_VSX
;
3189 rs6000_vector_unit
[V2DImode
]
3190 = (TARGET_P8_VECTOR
) ? VECTOR_P8_VECTOR
: VECTOR_NONE
;
3191 rs6000_vector_align
[V2DImode
] = align64
;
3193 rs6000_vector_mem
[V1TImode
] = VECTOR_VSX
;
3194 rs6000_vector_unit
[V1TImode
]
3195 = (TARGET_P8_VECTOR
) ? VECTOR_P8_VECTOR
: VECTOR_NONE
;
3196 rs6000_vector_align
[V1TImode
] = 128;
3199 /* DFmode, see if we want to use the VSX unit. Memory is handled
3200 differently, so don't set rs6000_vector_mem. */
3203 rs6000_vector_unit
[DFmode
] = VECTOR_VSX
;
3204 rs6000_vector_align
[DFmode
] = 64;
3207 /* SFmode, see if we want to use the VSX unit. */
3208 if (TARGET_P8_VECTOR
)
3210 rs6000_vector_unit
[SFmode
] = VECTOR_VSX
;
3211 rs6000_vector_align
[SFmode
] = 32;
3214 /* Allow TImode in VSX register and set the VSX memory macros. */
3217 rs6000_vector_mem
[TImode
] = VECTOR_VSX
;
3218 rs6000_vector_align
[TImode
] = align64
;
3221 /* TODO add paired floating point vector support. */
3223 /* Register class constraints for the constraints that depend on compile
3224 switches. When the VSX code was added, different constraints were added
3225 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
3226 of the VSX registers are used. The register classes for scalar floating
3227 point types is set, based on whether we allow that type into the upper
3228 (Altivec) registers. GCC has register classes to target the Altivec
3229 registers for load/store operations, to select using a VSX memory
3230 operation instead of the traditional floating point operation. The
3233 d - Register class to use with traditional DFmode instructions.
3234 f - Register class to use with traditional SFmode instructions.
3235 v - Altivec register.
3236 wa - Any VSX register.
3237 wc - Reserved to represent individual CR bits (used in LLVM).
3238 wd - Preferred register class for V2DFmode.
3239 wf - Preferred register class for V4SFmode.
3240 wg - Float register for power6x move insns.
3241 wh - FP register for direct move instructions.
3242 wi - FP or VSX register to hold 64-bit integers for VSX insns.
3243 wj - FP or VSX register to hold 64-bit integers for direct moves.
3244 wk - FP or VSX register to hold 64-bit doubles for direct moves.
3245 wl - Float register if we can do 32-bit signed int loads.
3246 wm - VSX register for ISA 2.07 direct move operations.
3247 wn - always NO_REGS.
3248 wr - GPR if 64-bit mode is permitted.
3249 ws - Register class to do ISA 2.06 DF operations.
3250 wt - VSX register for TImode in VSX registers.
3251 wu - Altivec register for ISA 2.07 VSX SF/SI load/stores.
3252 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
3253 ww - Register class to do SF conversions in with VSX operations.
3254 wx - Float register if we can do 32-bit int stores.
3255 wy - Register class to do ISA 2.07 SF operations.
3256 wz - Float register if we can do 32-bit unsigned int loads.
3257 wH - Altivec register if SImode is allowed in VSX registers.
3258 wI - VSX register if SImode is allowed in VSX registers.
3259 wJ - VSX register if QImode/HImode are allowed in VSX registers.
3260 wK - Altivec register if QImode/HImode are allowed in VSX registers. */
3262 if (TARGET_HARD_FLOAT
)
3263 rs6000_constraints
[RS6000_CONSTRAINT_f
] = FLOAT_REGS
; /* SFmode */
3265 if (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
3266 rs6000_constraints
[RS6000_CONSTRAINT_d
] = FLOAT_REGS
; /* DFmode */
3270 rs6000_constraints
[RS6000_CONSTRAINT_wa
] = VSX_REGS
;
3271 rs6000_constraints
[RS6000_CONSTRAINT_wd
] = VSX_REGS
; /* V2DFmode */
3272 rs6000_constraints
[RS6000_CONSTRAINT_wf
] = VSX_REGS
; /* V4SFmode */
3273 rs6000_constraints
[RS6000_CONSTRAINT_ws
] = VSX_REGS
; /* DFmode */
3274 rs6000_constraints
[RS6000_CONSTRAINT_wv
] = ALTIVEC_REGS
; /* DFmode */
3275 rs6000_constraints
[RS6000_CONSTRAINT_wi
] = VSX_REGS
; /* DImode */
3276 rs6000_constraints
[RS6000_CONSTRAINT_wt
] = VSX_REGS
; /* TImode */
3279 /* Add conditional constraints based on various options, to allow us to
3280 collapse multiple insn patterns. */
3282 rs6000_constraints
[RS6000_CONSTRAINT_v
] = ALTIVEC_REGS
;
3284 if (TARGET_MFPGPR
) /* DFmode */
3285 rs6000_constraints
[RS6000_CONSTRAINT_wg
] = FLOAT_REGS
;
3288 rs6000_constraints
[RS6000_CONSTRAINT_wl
] = FLOAT_REGS
; /* DImode */
3290 if (TARGET_DIRECT_MOVE
)
3292 rs6000_constraints
[RS6000_CONSTRAINT_wh
] = FLOAT_REGS
;
3293 rs6000_constraints
[RS6000_CONSTRAINT_wj
] /* DImode */
3294 = rs6000_constraints
[RS6000_CONSTRAINT_wi
];
3295 rs6000_constraints
[RS6000_CONSTRAINT_wk
] /* DFmode */
3296 = rs6000_constraints
[RS6000_CONSTRAINT_ws
];
3297 rs6000_constraints
[RS6000_CONSTRAINT_wm
] = VSX_REGS
;
3300 if (TARGET_POWERPC64
)
3302 rs6000_constraints
[RS6000_CONSTRAINT_wr
] = GENERAL_REGS
;
3303 rs6000_constraints
[RS6000_CONSTRAINT_wA
] = BASE_REGS
;
3306 if (TARGET_P8_VECTOR
) /* SFmode */
3308 rs6000_constraints
[RS6000_CONSTRAINT_wu
] = ALTIVEC_REGS
;
3309 rs6000_constraints
[RS6000_CONSTRAINT_wy
] = VSX_REGS
;
3310 rs6000_constraints
[RS6000_CONSTRAINT_ww
] = VSX_REGS
;
3312 else if (TARGET_VSX
)
3313 rs6000_constraints
[RS6000_CONSTRAINT_ww
] = FLOAT_REGS
;
3316 rs6000_constraints
[RS6000_CONSTRAINT_wx
] = FLOAT_REGS
; /* DImode */
3319 rs6000_constraints
[RS6000_CONSTRAINT_wz
] = FLOAT_REGS
; /* DImode */
3321 if (TARGET_FLOAT128_TYPE
)
3323 rs6000_constraints
[RS6000_CONSTRAINT_wq
] = VSX_REGS
; /* KFmode */
3324 if (FLOAT128_IEEE_P (TFmode
))
3325 rs6000_constraints
[RS6000_CONSTRAINT_wp
] = VSX_REGS
; /* TFmode */
3328 if (TARGET_P9_VECTOR
)
3330 /* Support for new D-form instructions. */
3331 rs6000_constraints
[RS6000_CONSTRAINT_wb
] = ALTIVEC_REGS
;
3333 /* Support for ISA 3.0 (power9) vectors. */
3334 rs6000_constraints
[RS6000_CONSTRAINT_wo
] = VSX_REGS
;
3337 /* Support for new direct moves (ISA 3.0 + 64bit). */
3338 if (TARGET_DIRECT_MOVE_128
)
3339 rs6000_constraints
[RS6000_CONSTRAINT_we
] = VSX_REGS
;
3341 /* Support small integers in VSX registers. */
3342 if (TARGET_P8_VECTOR
)
3344 rs6000_constraints
[RS6000_CONSTRAINT_wH
] = ALTIVEC_REGS
;
3345 rs6000_constraints
[RS6000_CONSTRAINT_wI
] = FLOAT_REGS
;
3346 if (TARGET_P9_VECTOR
)
3348 rs6000_constraints
[RS6000_CONSTRAINT_wJ
] = FLOAT_REGS
;
3349 rs6000_constraints
[RS6000_CONSTRAINT_wK
] = ALTIVEC_REGS
;
3353 /* Set up the reload helper and direct move functions. */
3354 if (TARGET_VSX
|| TARGET_ALTIVEC
)
3358 reg_addr
[V16QImode
].reload_store
= CODE_FOR_reload_v16qi_di_store
;
3359 reg_addr
[V16QImode
].reload_load
= CODE_FOR_reload_v16qi_di_load
;
3360 reg_addr
[V8HImode
].reload_store
= CODE_FOR_reload_v8hi_di_store
;
3361 reg_addr
[V8HImode
].reload_load
= CODE_FOR_reload_v8hi_di_load
;
3362 reg_addr
[V4SImode
].reload_store
= CODE_FOR_reload_v4si_di_store
;
3363 reg_addr
[V4SImode
].reload_load
= CODE_FOR_reload_v4si_di_load
;
3364 reg_addr
[V2DImode
].reload_store
= CODE_FOR_reload_v2di_di_store
;
3365 reg_addr
[V2DImode
].reload_load
= CODE_FOR_reload_v2di_di_load
;
3366 reg_addr
[V1TImode
].reload_store
= CODE_FOR_reload_v1ti_di_store
;
3367 reg_addr
[V1TImode
].reload_load
= CODE_FOR_reload_v1ti_di_load
;
3368 reg_addr
[V4SFmode
].reload_store
= CODE_FOR_reload_v4sf_di_store
;
3369 reg_addr
[V4SFmode
].reload_load
= CODE_FOR_reload_v4sf_di_load
;
3370 reg_addr
[V2DFmode
].reload_store
= CODE_FOR_reload_v2df_di_store
;
3371 reg_addr
[V2DFmode
].reload_load
= CODE_FOR_reload_v2df_di_load
;
3372 reg_addr
[DFmode
].reload_store
= CODE_FOR_reload_df_di_store
;
3373 reg_addr
[DFmode
].reload_load
= CODE_FOR_reload_df_di_load
;
3374 reg_addr
[DDmode
].reload_store
= CODE_FOR_reload_dd_di_store
;
3375 reg_addr
[DDmode
].reload_load
= CODE_FOR_reload_dd_di_load
;
3376 reg_addr
[SFmode
].reload_store
= CODE_FOR_reload_sf_di_store
;
3377 reg_addr
[SFmode
].reload_load
= CODE_FOR_reload_sf_di_load
;
3379 if (FLOAT128_VECTOR_P (KFmode
))
3381 reg_addr
[KFmode
].reload_store
= CODE_FOR_reload_kf_di_store
;
3382 reg_addr
[KFmode
].reload_load
= CODE_FOR_reload_kf_di_load
;
3385 if (FLOAT128_VECTOR_P (TFmode
))
3387 reg_addr
[TFmode
].reload_store
= CODE_FOR_reload_tf_di_store
;
3388 reg_addr
[TFmode
].reload_load
= CODE_FOR_reload_tf_di_load
;
3391 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3393 if (TARGET_NO_SDMODE_STACK
)
3395 reg_addr
[SDmode
].reload_store
= CODE_FOR_reload_sd_di_store
;
3396 reg_addr
[SDmode
].reload_load
= CODE_FOR_reload_sd_di_load
;
3401 reg_addr
[TImode
].reload_store
= CODE_FOR_reload_ti_di_store
;
3402 reg_addr
[TImode
].reload_load
= CODE_FOR_reload_ti_di_load
;
3405 if (TARGET_DIRECT_MOVE
&& !TARGET_DIRECT_MOVE_128
)
3407 reg_addr
[TImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxti
;
3408 reg_addr
[V1TImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv1ti
;
3409 reg_addr
[V2DFmode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv2df
;
3410 reg_addr
[V2DImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv2di
;
3411 reg_addr
[V4SFmode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv4sf
;
3412 reg_addr
[V4SImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv4si
;
3413 reg_addr
[V8HImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv8hi
;
3414 reg_addr
[V16QImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv16qi
;
3415 reg_addr
[SFmode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxsf
;
3417 reg_addr
[TImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprti
;
3418 reg_addr
[V1TImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv1ti
;
3419 reg_addr
[V2DFmode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv2df
;
3420 reg_addr
[V2DImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv2di
;
3421 reg_addr
[V4SFmode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv4sf
;
3422 reg_addr
[V4SImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv4si
;
3423 reg_addr
[V8HImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv8hi
;
3424 reg_addr
[V16QImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv16qi
;
3425 reg_addr
[SFmode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprsf
;
3427 if (FLOAT128_VECTOR_P (KFmode
))
3429 reg_addr
[KFmode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxkf
;
3430 reg_addr
[KFmode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprkf
;
3433 if (FLOAT128_VECTOR_P (TFmode
))
3435 reg_addr
[TFmode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxtf
;
3436 reg_addr
[TFmode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprtf
;
3442 reg_addr
[V16QImode
].reload_store
= CODE_FOR_reload_v16qi_si_store
;
3443 reg_addr
[V16QImode
].reload_load
= CODE_FOR_reload_v16qi_si_load
;
3444 reg_addr
[V8HImode
].reload_store
= CODE_FOR_reload_v8hi_si_store
;
3445 reg_addr
[V8HImode
].reload_load
= CODE_FOR_reload_v8hi_si_load
;
3446 reg_addr
[V4SImode
].reload_store
= CODE_FOR_reload_v4si_si_store
;
3447 reg_addr
[V4SImode
].reload_load
= CODE_FOR_reload_v4si_si_load
;
3448 reg_addr
[V2DImode
].reload_store
= CODE_FOR_reload_v2di_si_store
;
3449 reg_addr
[V2DImode
].reload_load
= CODE_FOR_reload_v2di_si_load
;
3450 reg_addr
[V1TImode
].reload_store
= CODE_FOR_reload_v1ti_si_store
;
3451 reg_addr
[V1TImode
].reload_load
= CODE_FOR_reload_v1ti_si_load
;
3452 reg_addr
[V4SFmode
].reload_store
= CODE_FOR_reload_v4sf_si_store
;
3453 reg_addr
[V4SFmode
].reload_load
= CODE_FOR_reload_v4sf_si_load
;
3454 reg_addr
[V2DFmode
].reload_store
= CODE_FOR_reload_v2df_si_store
;
3455 reg_addr
[V2DFmode
].reload_load
= CODE_FOR_reload_v2df_si_load
;
3456 reg_addr
[DFmode
].reload_store
= CODE_FOR_reload_df_si_store
;
3457 reg_addr
[DFmode
].reload_load
= CODE_FOR_reload_df_si_load
;
3458 reg_addr
[DDmode
].reload_store
= CODE_FOR_reload_dd_si_store
;
3459 reg_addr
[DDmode
].reload_load
= CODE_FOR_reload_dd_si_load
;
3460 reg_addr
[SFmode
].reload_store
= CODE_FOR_reload_sf_si_store
;
3461 reg_addr
[SFmode
].reload_load
= CODE_FOR_reload_sf_si_load
;
3463 if (FLOAT128_VECTOR_P (KFmode
))
3465 reg_addr
[KFmode
].reload_store
= CODE_FOR_reload_kf_si_store
;
3466 reg_addr
[KFmode
].reload_load
= CODE_FOR_reload_kf_si_load
;
3469 if (FLOAT128_IEEE_P (TFmode
))
3471 reg_addr
[TFmode
].reload_store
= CODE_FOR_reload_tf_si_store
;
3472 reg_addr
[TFmode
].reload_load
= CODE_FOR_reload_tf_si_load
;
3475 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3477 if (TARGET_NO_SDMODE_STACK
)
3479 reg_addr
[SDmode
].reload_store
= CODE_FOR_reload_sd_si_store
;
3480 reg_addr
[SDmode
].reload_load
= CODE_FOR_reload_sd_si_load
;
3485 reg_addr
[TImode
].reload_store
= CODE_FOR_reload_ti_si_store
;
3486 reg_addr
[TImode
].reload_load
= CODE_FOR_reload_ti_si_load
;
3489 if (TARGET_DIRECT_MOVE
)
3491 reg_addr
[DImode
].reload_fpr_gpr
= CODE_FOR_reload_fpr_from_gprdi
;
3492 reg_addr
[DDmode
].reload_fpr_gpr
= CODE_FOR_reload_fpr_from_gprdd
;
3493 reg_addr
[DFmode
].reload_fpr_gpr
= CODE_FOR_reload_fpr_from_gprdf
;
3497 reg_addr
[DFmode
].scalar_in_vmx_p
= true;
3498 reg_addr
[DImode
].scalar_in_vmx_p
= true;
3500 if (TARGET_P8_VECTOR
)
3502 reg_addr
[SFmode
].scalar_in_vmx_p
= true;
3503 reg_addr
[SImode
].scalar_in_vmx_p
= true;
3505 if (TARGET_P9_VECTOR
)
3507 reg_addr
[HImode
].scalar_in_vmx_p
= true;
3508 reg_addr
[QImode
].scalar_in_vmx_p
= true;
3513 /* Setup the fusion operations. */
3514 if (TARGET_P8_FUSION
)
3516 reg_addr
[QImode
].fusion_gpr_ld
= CODE_FOR_fusion_gpr_load_qi
;
3517 reg_addr
[HImode
].fusion_gpr_ld
= CODE_FOR_fusion_gpr_load_hi
;
3518 reg_addr
[SImode
].fusion_gpr_ld
= CODE_FOR_fusion_gpr_load_si
;
3520 reg_addr
[DImode
].fusion_gpr_ld
= CODE_FOR_fusion_gpr_load_di
;
3523 if (TARGET_P9_FUSION
)
3526 enum machine_mode mode
; /* mode of the fused type. */
3527 enum machine_mode pmode
; /* pointer mode. */
3528 enum rs6000_reload_reg_type rtype
; /* register type. */
3529 enum insn_code load
; /* load insn. */
3530 enum insn_code store
; /* store insn. */
3533 static const struct fuse_insns addis_insns
[] = {
3534 { E_SFmode
, E_DImode
, RELOAD_REG_FPR
,
3535 CODE_FOR_fusion_vsx_di_sf_load
,
3536 CODE_FOR_fusion_vsx_di_sf_store
},
3538 { E_SFmode
, E_SImode
, RELOAD_REG_FPR
,
3539 CODE_FOR_fusion_vsx_si_sf_load
,
3540 CODE_FOR_fusion_vsx_si_sf_store
},
3542 { E_DFmode
, E_DImode
, RELOAD_REG_FPR
,
3543 CODE_FOR_fusion_vsx_di_df_load
,
3544 CODE_FOR_fusion_vsx_di_df_store
},
3546 { E_DFmode
, E_SImode
, RELOAD_REG_FPR
,
3547 CODE_FOR_fusion_vsx_si_df_load
,
3548 CODE_FOR_fusion_vsx_si_df_store
},
3550 { E_DImode
, E_DImode
, RELOAD_REG_FPR
,
3551 CODE_FOR_fusion_vsx_di_di_load
,
3552 CODE_FOR_fusion_vsx_di_di_store
},
3554 { E_DImode
, E_SImode
, RELOAD_REG_FPR
,
3555 CODE_FOR_fusion_vsx_si_di_load
,
3556 CODE_FOR_fusion_vsx_si_di_store
},
3558 { E_QImode
, E_DImode
, RELOAD_REG_GPR
,
3559 CODE_FOR_fusion_gpr_di_qi_load
,
3560 CODE_FOR_fusion_gpr_di_qi_store
},
3562 { E_QImode
, E_SImode
, RELOAD_REG_GPR
,
3563 CODE_FOR_fusion_gpr_si_qi_load
,
3564 CODE_FOR_fusion_gpr_si_qi_store
},
3566 { E_HImode
, E_DImode
, RELOAD_REG_GPR
,
3567 CODE_FOR_fusion_gpr_di_hi_load
,
3568 CODE_FOR_fusion_gpr_di_hi_store
},
3570 { E_HImode
, E_SImode
, RELOAD_REG_GPR
,
3571 CODE_FOR_fusion_gpr_si_hi_load
,
3572 CODE_FOR_fusion_gpr_si_hi_store
},
3574 { E_SImode
, E_DImode
, RELOAD_REG_GPR
,
3575 CODE_FOR_fusion_gpr_di_si_load
,
3576 CODE_FOR_fusion_gpr_di_si_store
},
3578 { E_SImode
, E_SImode
, RELOAD_REG_GPR
,
3579 CODE_FOR_fusion_gpr_si_si_load
,
3580 CODE_FOR_fusion_gpr_si_si_store
},
3582 { E_SFmode
, E_DImode
, RELOAD_REG_GPR
,
3583 CODE_FOR_fusion_gpr_di_sf_load
,
3584 CODE_FOR_fusion_gpr_di_sf_store
},
3586 { E_SFmode
, E_SImode
, RELOAD_REG_GPR
,
3587 CODE_FOR_fusion_gpr_si_sf_load
,
3588 CODE_FOR_fusion_gpr_si_sf_store
},
3590 { E_DImode
, E_DImode
, RELOAD_REG_GPR
,
3591 CODE_FOR_fusion_gpr_di_di_load
,
3592 CODE_FOR_fusion_gpr_di_di_store
},
3594 { E_DFmode
, E_DImode
, RELOAD_REG_GPR
,
3595 CODE_FOR_fusion_gpr_di_df_load
,
3596 CODE_FOR_fusion_gpr_di_df_store
},
3599 machine_mode cur_pmode
= Pmode
;
3602 for (i
= 0; i
< ARRAY_SIZE (addis_insns
); i
++)
3604 machine_mode xmode
= addis_insns
[i
].mode
;
3605 enum rs6000_reload_reg_type rtype
= addis_insns
[i
].rtype
;
3607 if (addis_insns
[i
].pmode
!= cur_pmode
)
3610 if (rtype
== RELOAD_REG_FPR
&& !TARGET_HARD_FLOAT
)
3613 reg_addr
[xmode
].fusion_addis_ld
[rtype
] = addis_insns
[i
].load
;
3614 reg_addr
[xmode
].fusion_addis_st
[rtype
] = addis_insns
[i
].store
;
3616 if (rtype
== RELOAD_REG_FPR
&& TARGET_P9_VECTOR
)
3618 reg_addr
[xmode
].fusion_addis_ld
[RELOAD_REG_VMX
]
3619 = addis_insns
[i
].load
;
3620 reg_addr
[xmode
].fusion_addis_st
[RELOAD_REG_VMX
]
3621 = addis_insns
[i
].store
;
3626 /* Note which types we support fusing TOC setup plus memory insn. We only do
3627 fused TOCs for medium/large code models. */
3628 if (TARGET_P8_FUSION
&& TARGET_TOC_FUSION
&& TARGET_POWERPC64
3629 && (TARGET_CMODEL
!= CMODEL_SMALL
))
3631 reg_addr
[QImode
].fused_toc
= true;
3632 reg_addr
[HImode
].fused_toc
= true;
3633 reg_addr
[SImode
].fused_toc
= true;
3634 reg_addr
[DImode
].fused_toc
= true;
3635 if (TARGET_HARD_FLOAT
)
3637 if (TARGET_SINGLE_FLOAT
)
3638 reg_addr
[SFmode
].fused_toc
= true;
3639 if (TARGET_DOUBLE_FLOAT
)
3640 reg_addr
[DFmode
].fused_toc
= true;
3644 /* Precalculate HARD_REGNO_NREGS. */
3645 for (r
= 0; r
< FIRST_PSEUDO_REGISTER
; ++r
)
3646 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
3647 rs6000_hard_regno_nregs
[m
][r
]
3648 = rs6000_hard_regno_nregs_internal (r
, (machine_mode
)m
);
3650 /* Precalculate TARGET_HARD_REGNO_MODE_OK. */
3651 for (r
= 0; r
< FIRST_PSEUDO_REGISTER
; ++r
)
3652 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
3653 if (rs6000_hard_regno_mode_ok_uncached (r
, (machine_mode
)m
))
3654 rs6000_hard_regno_mode_ok_p
[m
][r
] = true;
3656 /* Precalculate CLASS_MAX_NREGS sizes. */
3657 for (c
= 0; c
< LIM_REG_CLASSES
; ++c
)
3661 if (TARGET_VSX
&& VSX_REG_CLASS_P (c
))
3662 reg_size
= UNITS_PER_VSX_WORD
;
3664 else if (c
== ALTIVEC_REGS
)
3665 reg_size
= UNITS_PER_ALTIVEC_WORD
;
3667 else if (c
== FLOAT_REGS
)
3668 reg_size
= UNITS_PER_FP_WORD
;
3671 reg_size
= UNITS_PER_WORD
;
3673 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
3675 machine_mode m2
= (machine_mode
)m
;
3676 int reg_size2
= reg_size
;
3678 /* TDmode & IBM 128-bit floating point always takes 2 registers, even
3680 if (TARGET_VSX
&& VSX_REG_CLASS_P (c
) && FLOAT128_2REG_P (m
))
3681 reg_size2
= UNITS_PER_FP_WORD
;
3683 rs6000_class_max_nregs
[m
][c
]
3684 = (GET_MODE_SIZE (m2
) + reg_size2
- 1) / reg_size2
;
3688 /* Calculate which modes to automatically generate code to use a the
3689 reciprocal divide and square root instructions. In the future, possibly
3690 automatically generate the instructions even if the user did not specify
3691 -mrecip. The older machines double precision reciprocal sqrt estimate is
3692 not accurate enough. */
3693 memset (rs6000_recip_bits
, 0, sizeof (rs6000_recip_bits
));
3695 rs6000_recip_bits
[SFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
3697 rs6000_recip_bits
[DFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
3698 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
))
3699 rs6000_recip_bits
[V4SFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
3700 if (VECTOR_UNIT_VSX_P (V2DFmode
))
3701 rs6000_recip_bits
[V2DFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
3703 if (TARGET_FRSQRTES
)
3704 rs6000_recip_bits
[SFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
3706 rs6000_recip_bits
[DFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
3707 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
))
3708 rs6000_recip_bits
[V4SFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
3709 if (VECTOR_UNIT_VSX_P (V2DFmode
))
3710 rs6000_recip_bits
[V2DFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
3712 if (rs6000_recip_control
)
3714 if (!flag_finite_math_only
)
3715 warning (0, "%qs requires %qs or %qs", "-mrecip", "-ffinite-math",
3717 if (flag_trapping_math
)
3718 warning (0, "%qs requires %qs or %qs", "-mrecip",
3719 "-fno-trapping-math", "-ffast-math");
3720 if (!flag_reciprocal_math
)
3721 warning (0, "%qs requires %qs or %qs", "-mrecip", "-freciprocal-math",
3723 if (flag_finite_math_only
&& !flag_trapping_math
&& flag_reciprocal_math
)
3725 if (RS6000_RECIP_HAVE_RE_P (SFmode
)
3726 && (rs6000_recip_control
& RECIP_SF_DIV
) != 0)
3727 rs6000_recip_bits
[SFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
3729 if (RS6000_RECIP_HAVE_RE_P (DFmode
)
3730 && (rs6000_recip_control
& RECIP_DF_DIV
) != 0)
3731 rs6000_recip_bits
[DFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
3733 if (RS6000_RECIP_HAVE_RE_P (V4SFmode
)
3734 && (rs6000_recip_control
& RECIP_V4SF_DIV
) != 0)
3735 rs6000_recip_bits
[V4SFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
3737 if (RS6000_RECIP_HAVE_RE_P (V2DFmode
)
3738 && (rs6000_recip_control
& RECIP_V2DF_DIV
) != 0)
3739 rs6000_recip_bits
[V2DFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
3741 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode
)
3742 && (rs6000_recip_control
& RECIP_SF_RSQRT
) != 0)
3743 rs6000_recip_bits
[SFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
3745 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode
)
3746 && (rs6000_recip_control
& RECIP_DF_RSQRT
) != 0)
3747 rs6000_recip_bits
[DFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
3749 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode
)
3750 && (rs6000_recip_control
& RECIP_V4SF_RSQRT
) != 0)
3751 rs6000_recip_bits
[V4SFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
3753 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode
)
3754 && (rs6000_recip_control
& RECIP_V2DF_RSQRT
) != 0)
3755 rs6000_recip_bits
[V2DFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
3759 /* Update the addr mask bits in reg_addr to help secondary reload and go if
3760 legitimate address support to figure out the appropriate addressing to
3762 rs6000_setup_reg_addr_masks ();
3764 if (global_init_p
|| TARGET_DEBUG_TARGET
)
3766 if (TARGET_DEBUG_REG
)
3767 rs6000_debug_reg_global ();
3769 if (TARGET_DEBUG_COST
|| TARGET_DEBUG_REG
)
3771 "SImode variable mult cost = %d\n"
3772 "SImode constant mult cost = %d\n"
3773 "SImode short constant mult cost = %d\n"
3774 "DImode multipliciation cost = %d\n"
3775 "SImode division cost = %d\n"
3776 "DImode division cost = %d\n"
3777 "Simple fp operation cost = %d\n"
3778 "DFmode multiplication cost = %d\n"
3779 "SFmode division cost = %d\n"
3780 "DFmode division cost = %d\n"
3781 "cache line size = %d\n"
3782 "l1 cache size = %d\n"
3783 "l2 cache size = %d\n"
3784 "simultaneous prefetches = %d\n"
3787 rs6000_cost
->mulsi_const
,
3788 rs6000_cost
->mulsi_const9
,
3796 rs6000_cost
->cache_line_size
,
3797 rs6000_cost
->l1_cache_size
,
3798 rs6000_cost
->l2_cache_size
,
3799 rs6000_cost
->simultaneous_prefetches
);
3804 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
3807 darwin_rs6000_override_options (void)
3809 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
3811 rs6000_altivec_abi
= 1;
3812 TARGET_ALTIVEC_VRSAVE
= 1;
3813 rs6000_current_abi
= ABI_DARWIN
;
3815 if (DEFAULT_ABI
== ABI_DARWIN
3817 darwin_one_byte_bool
= 1;
3819 if (TARGET_64BIT
&& ! TARGET_POWERPC64
)
3821 rs6000_isa_flags
|= OPTION_MASK_POWERPC64
;
3822 warning (0, "%qs requires PowerPC64 architecture, enabling", "-m64");
3826 rs6000_default_long_calls
= 1;
3827 rs6000_isa_flags
|= OPTION_MASK_SOFT_FLOAT
;
3830 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3832 if (!flag_mkernel
&& !flag_apple_kext
3834 && ! (rs6000_isa_flags_explicit
& OPTION_MASK_ALTIVEC
))
3835 rs6000_isa_flags
|= OPTION_MASK_ALTIVEC
;
3837 /* Unless the user (not the configurer) has explicitly overridden
3838 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3839 G4 unless targeting the kernel. */
3842 && strverscmp (darwin_macosx_version_min
, "10.5") >= 0
3843 && ! (rs6000_isa_flags_explicit
& OPTION_MASK_ALTIVEC
)
3844 && ! global_options_set
.x_rs6000_cpu_index
)
3846 rs6000_isa_flags
|= OPTION_MASK_ALTIVEC
;
3851 /* If not otherwise specified by a target, make 'long double' equivalent to
3854 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3855 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3858 /* Return the builtin mask of the various options used that could affect which
3859 builtins were used. In the past we used target_flags, but we've run out of
3860 bits, and some options like PAIRED are no longer in target_flags. */
3863 rs6000_builtin_mask_calculate (void)
3865 return (((TARGET_ALTIVEC
) ? RS6000_BTM_ALTIVEC
: 0)
3866 | ((TARGET_CMPB
) ? RS6000_BTM_CMPB
: 0)
3867 | ((TARGET_VSX
) ? RS6000_BTM_VSX
: 0)
3868 | ((TARGET_PAIRED_FLOAT
) ? RS6000_BTM_PAIRED
: 0)
3869 | ((TARGET_FRE
) ? RS6000_BTM_FRE
: 0)
3870 | ((TARGET_FRES
) ? RS6000_BTM_FRES
: 0)
3871 | ((TARGET_FRSQRTE
) ? RS6000_BTM_FRSQRTE
: 0)
3872 | ((TARGET_FRSQRTES
) ? RS6000_BTM_FRSQRTES
: 0)
3873 | ((TARGET_POPCNTD
) ? RS6000_BTM_POPCNTD
: 0)
3874 | ((rs6000_cpu
== PROCESSOR_CELL
) ? RS6000_BTM_CELL
: 0)
3875 | ((TARGET_P8_VECTOR
) ? RS6000_BTM_P8_VECTOR
: 0)
3876 | ((TARGET_P9_VECTOR
) ? RS6000_BTM_P9_VECTOR
: 0)
3877 | ((TARGET_P9_MISC
) ? RS6000_BTM_P9_MISC
: 0)
3878 | ((TARGET_MODULO
) ? RS6000_BTM_MODULO
: 0)
3879 | ((TARGET_64BIT
) ? RS6000_BTM_64BIT
: 0)
3880 | ((TARGET_CRYPTO
) ? RS6000_BTM_CRYPTO
: 0)
3881 | ((TARGET_HTM
) ? RS6000_BTM_HTM
: 0)
3882 | ((TARGET_DFP
) ? RS6000_BTM_DFP
: 0)
3883 | ((TARGET_HARD_FLOAT
) ? RS6000_BTM_HARD_FLOAT
: 0)
3884 | ((TARGET_LONG_DOUBLE_128
) ? RS6000_BTM_LDBL128
: 0)
3885 | ((TARGET_FLOAT128_TYPE
) ? RS6000_BTM_FLOAT128
: 0)
3886 | ((TARGET_FLOAT128_HW
) ? RS6000_BTM_FLOAT128_HW
: 0));
3889 /* Implement TARGET_MD_ASM_ADJUST. All asm statements are considered
3890 to clobber the XER[CA] bit because clobbering that bit without telling
3891 the compiler worked just fine with versions of GCC before GCC 5, and
3892 breaking a lot of older code in ways that are hard to track down is
3893 not such a great idea. */
3896 rs6000_md_asm_adjust (vec
<rtx
> &/*outputs*/, vec
<rtx
> &/*inputs*/,
3897 vec
<const char *> &/*constraints*/,
3898 vec
<rtx
> &clobbers
, HARD_REG_SET
&clobbered_regs
)
3900 clobbers
.safe_push (gen_rtx_REG (SImode
, CA_REGNO
));
3901 SET_HARD_REG_BIT (clobbered_regs
, CA_REGNO
);
3905 /* Override command line options.
3907 Combine build-specific configuration information with options
3908 specified on the command line to set various state variables which
3909 influence code generation, optimization, and expansion of built-in
3910 functions. Assure that command-line configuration preferences are
3911 compatible with each other and with the build configuration; issue
3912 warnings while adjusting configuration or error messages while
3913 rejecting configuration.
3915 Upon entry to this function:
3917 This function is called once at the beginning of
3918 compilation, and then again at the start and end of compiling
3919 each section of code that has a different configuration, as
3920 indicated, for example, by adding the
3922 __attribute__((__target__("cpu=power9")))
3924 qualifier to a function definition or, for example, by bracketing
3927 #pragma GCC target("altivec")
3931 #pragma GCC reset_options
3933 directives. Parameter global_init_p is true for the initial
3934 invocation, which initializes global variables, and false for all
3935 subsequent invocations.
3938 Various global state information is assumed to be valid. This
3939 includes OPTION_TARGET_CPU_DEFAULT, representing the name of the
3940 default CPU specified at build configure time, TARGET_DEFAULT,
3941 representing the default set of option flags for the default
3942 target, and global_options_set.x_rs6000_isa_flags, representing
3943 which options were requested on the command line.
3945 Upon return from this function:
3947 rs6000_isa_flags_explicit has a non-zero bit for each flag that
3948 was set by name on the command line. Additionally, if certain
3949 attributes are automatically enabled or disabled by this function
3950 in order to assure compatibility between options and
3951 configuration, the flags associated with those attributes are
3952 also set. By setting these "explicit bits", we avoid the risk
3953 that other code might accidentally overwrite these particular
3954 attributes with "default values".
3956 The various bits of rs6000_isa_flags are set to indicate the
3957 target options that have been selected for the most current
3958 compilation efforts. This has the effect of also turning on the
3959 associated TARGET_XXX values since these are macros which are
3960 generally defined to test the corresponding bit of the
3961 rs6000_isa_flags variable.
3963 The variable rs6000_builtin_mask is set to represent the target
3964 options for the most current compilation efforts, consistent with
3965 the current contents of rs6000_isa_flags. This variable controls
3966 expansion of built-in functions.
3968 Various other global variables and fields of global structures
3969 (over 50 in all) are initialized to reflect the desired options
3970 for the most current compilation efforts. */
3973 rs6000_option_override_internal (bool global_init_p
)
3977 HOST_WIDE_INT set_masks
;
3978 HOST_WIDE_INT ignore_masks
;
3981 struct cl_target_option
*main_target_opt
3982 = ((global_init_p
|| target_option_default_node
== NULL
)
3983 ? NULL
: TREE_TARGET_OPTION (target_option_default_node
));
3985 /* Print defaults. */
3986 if ((TARGET_DEBUG_REG
|| TARGET_DEBUG_TARGET
) && global_init_p
)
3987 rs6000_print_isa_options (stderr
, 0, "TARGET_DEFAULT", TARGET_DEFAULT
);
3989 /* Remember the explicit arguments. */
3991 rs6000_isa_flags_explicit
= global_options_set
.x_rs6000_isa_flags
;
3993 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
3994 library functions, so warn about it. The flag may be useful for
3995 performance studies from time to time though, so don't disable it
3997 if (global_options_set
.x_rs6000_alignment_flags
3998 && rs6000_alignment_flags
== MASK_ALIGN_POWER
3999 && DEFAULT_ABI
== ABI_DARWIN
4001 warning (0, "%qs is not supported for 64-bit Darwin;"
4002 " it is incompatible with the installed C and C++ libraries",
4005 /* Numerous experiment shows that IRA based loop pressure
4006 calculation works better for RTL loop invariant motion on targets
4007 with enough (>= 32) registers. It is an expensive optimization.
4008 So it is on only for peak performance. */
4009 if (optimize
>= 3 && global_init_p
4010 && !global_options_set
.x_flag_ira_loop_pressure
)
4011 flag_ira_loop_pressure
= 1;
4013 /* -fsanitize=address needs to turn on -fasynchronous-unwind-tables in order
4014 for tracebacks to be complete but not if any -fasynchronous-unwind-tables
4015 options were already specified. */
4016 if (flag_sanitize
& SANITIZE_USER_ADDRESS
4017 && !global_options_set
.x_flag_asynchronous_unwind_tables
)
4018 flag_asynchronous_unwind_tables
= 1;
4020 /* Set the pointer size. */
4023 rs6000_pmode
= DImode
;
4024 rs6000_pointer_size
= 64;
4028 rs6000_pmode
= SImode
;
4029 rs6000_pointer_size
= 32;
4032 /* Some OSs don't support saving the high part of 64-bit registers on context
4033 switch. Other OSs don't support saving Altivec registers. On those OSs,
4034 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
4035 if the user wants either, the user must explicitly specify them and we
4036 won't interfere with the user's specification. */
4038 set_masks
= POWERPC_MASKS
;
4039 #ifdef OS_MISSING_POWERPC64
4040 if (OS_MISSING_POWERPC64
)
4041 set_masks
&= ~OPTION_MASK_POWERPC64
;
4043 #ifdef OS_MISSING_ALTIVEC
4044 if (OS_MISSING_ALTIVEC
)
4045 set_masks
&= ~(OPTION_MASK_ALTIVEC
| OPTION_MASK_VSX
4046 | OTHER_VSX_VECTOR_MASKS
);
4049 /* Don't override by the processor default if given explicitly. */
4050 set_masks
&= ~rs6000_isa_flags_explicit
;
4052 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
4053 the cpu in a target attribute or pragma, but did not specify a tuning
4054 option, use the cpu for the tuning option rather than the option specified
4055 with -mtune on the command line. Process a '--with-cpu' configuration
4056 request as an implicit --cpu. */
4057 if (rs6000_cpu_index
>= 0)
4058 cpu_index
= rs6000_cpu_index
;
4059 else if (main_target_opt
!= NULL
&& main_target_opt
->x_rs6000_cpu_index
>= 0)
4060 cpu_index
= main_target_opt
->x_rs6000_cpu_index
;
4061 else if (OPTION_TARGET_CPU_DEFAULT
)
4062 cpu_index
= rs6000_cpu_name_lookup (OPTION_TARGET_CPU_DEFAULT
);
4066 const char *unavailable_cpu
= NULL
;
4067 switch (processor_target_table
[cpu_index
].processor
)
4069 #ifndef HAVE_AS_POWER9
4070 case PROCESSOR_POWER9
:
4071 unavailable_cpu
= "power9";
4074 #ifndef HAVE_AS_POWER8
4075 case PROCESSOR_POWER8
:
4076 unavailable_cpu
= "power8";
4079 #ifndef HAVE_AS_POPCNTD
4080 case PROCESSOR_POWER7
:
4081 unavailable_cpu
= "power7";
4085 case PROCESSOR_POWER6
:
4086 unavailable_cpu
= "power6";
4089 #ifndef HAVE_AS_POPCNTB
4090 case PROCESSOR_POWER5
:
4091 unavailable_cpu
= "power5";
4097 if (unavailable_cpu
)
4100 warning (0, "will not generate %qs instructions because "
4101 "assembler lacks %qs support", unavailable_cpu
,
4106 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
4107 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
4108 with those from the cpu, except for options that were explicitly set. If
4109 we don't have a cpu, do not override the target bits set in
4113 rs6000_cpu_index
= cpu_index
;
4114 rs6000_isa_flags
&= ~set_masks
;
4115 rs6000_isa_flags
|= (processor_target_table
[cpu_index
].target_enable
4120 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
4121 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
4122 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
4123 to using rs6000_isa_flags, we need to do the initialization here.
4125 If there is a TARGET_DEFAULT, use that. Otherwise fall back to using
4126 -mcpu=powerpc, -mcpu=powerpc64, or -mcpu=powerpc64le defaults. */
4127 HOST_WIDE_INT flags
;
4129 flags
= TARGET_DEFAULT
;
4132 /* PowerPC 64-bit LE requires at least ISA 2.07. */
4133 const char *default_cpu
= (!TARGET_POWERPC64
4138 int default_cpu_index
= rs6000_cpu_name_lookup (default_cpu
);
4139 flags
= processor_target_table
[default_cpu_index
].target_enable
;
4141 rs6000_isa_flags
|= (flags
& ~rs6000_isa_flags_explicit
);
4144 if (rs6000_tune_index
>= 0)
4145 tune_index
= rs6000_tune_index
;
4146 else if (cpu_index
>= 0)
4147 rs6000_tune_index
= tune_index
= cpu_index
;
4151 enum processor_type tune_proc
4152 = (TARGET_POWERPC64
? PROCESSOR_DEFAULT64
: PROCESSOR_DEFAULT
);
4155 for (i
= 0; i
< ARRAY_SIZE (processor_target_table
); i
++)
4156 if (processor_target_table
[i
].processor
== tune_proc
)
4163 gcc_assert (tune_index
>= 0);
4164 rs6000_cpu
= processor_target_table
[tune_index
].processor
;
4166 if (rs6000_cpu
== PROCESSOR_PPCE300C2
|| rs6000_cpu
== PROCESSOR_PPCE300C3
4167 || rs6000_cpu
== PROCESSOR_PPCE500MC
|| rs6000_cpu
== PROCESSOR_PPCE500MC64
4168 || rs6000_cpu
== PROCESSOR_PPCE5500
)
4171 error ("AltiVec not supported in this target");
4174 /* If we are optimizing big endian systems for space, use the load/store
4175 multiple and string instructions. */
4176 if (BYTES_BIG_ENDIAN
&& optimize_size
)
4177 rs6000_isa_flags
|= ~rs6000_isa_flags_explicit
& (OPTION_MASK_MULTIPLE
4178 | OPTION_MASK_STRING
);
4180 /* Don't allow -mmultiple or -mstring on little endian systems
4181 unless the cpu is a 750, because the hardware doesn't support the
4182 instructions used in little endian mode, and causes an alignment
4183 trap. The 750 does not cause an alignment trap (except when the
4184 target is unaligned). */
4186 if (!BYTES_BIG_ENDIAN
&& rs6000_cpu
!= PROCESSOR_PPC750
)
4188 if (TARGET_MULTIPLE
)
4190 rs6000_isa_flags
&= ~OPTION_MASK_MULTIPLE
;
4191 if ((rs6000_isa_flags_explicit
& OPTION_MASK_MULTIPLE
) != 0)
4192 warning (0, "%qs is not supported on little endian systems",
4198 rs6000_isa_flags
&= ~OPTION_MASK_STRING
;
4199 if ((rs6000_isa_flags_explicit
& OPTION_MASK_STRING
) != 0)
4200 warning (0, "%qs is not supported on little endian systems",
4205 /* If little-endian, default to -mstrict-align on older processors.
4206 Testing for htm matches power8 and later. */
4207 if (!BYTES_BIG_ENDIAN
4208 && !(processor_target_table
[tune_index
].target_enable
& OPTION_MASK_HTM
))
4209 rs6000_isa_flags
|= ~rs6000_isa_flags_explicit
& OPTION_MASK_STRICT_ALIGN
;
4211 /* -maltivec={le,be} implies -maltivec. */
4212 if (rs6000_altivec_element_order
!= 0)
4213 rs6000_isa_flags
|= OPTION_MASK_ALTIVEC
;
4215 /* Disallow -maltivec=le in big endian mode for now. This is not
4216 known to be useful for anyone. */
4217 if (BYTES_BIG_ENDIAN
&& rs6000_altivec_element_order
== 1)
4219 warning (0, N_("-maltivec=le not allowed for big-endian targets"));
4220 rs6000_altivec_element_order
= 0;
4223 if (!rs6000_fold_gimple
)
4225 "gimple folding of rs6000 builtins has been disabled.\n");
4227 /* Add some warnings for VSX. */
4230 const char *msg
= NULL
;
4231 if (!TARGET_HARD_FLOAT
|| !TARGET_SINGLE_FLOAT
|| !TARGET_DOUBLE_FLOAT
)
4233 if (rs6000_isa_flags_explicit
& OPTION_MASK_VSX
)
4234 msg
= N_("-mvsx requires hardware floating point");
4237 rs6000_isa_flags
&= ~ OPTION_MASK_VSX
;
4238 rs6000_isa_flags_explicit
|= OPTION_MASK_VSX
;
4241 else if (TARGET_PAIRED_FLOAT
)
4242 msg
= N_("-mvsx and -mpaired are incompatible");
4243 else if (TARGET_AVOID_XFORM
> 0)
4244 msg
= N_("-mvsx needs indexed addressing");
4245 else if (!TARGET_ALTIVEC
&& (rs6000_isa_flags_explicit
4246 & OPTION_MASK_ALTIVEC
))
4248 if (rs6000_isa_flags_explicit
& OPTION_MASK_VSX
)
4249 msg
= N_("-mvsx and -mno-altivec are incompatible");
4251 msg
= N_("-mno-altivec disables vsx");
4257 rs6000_isa_flags
&= ~ OPTION_MASK_VSX
;
4258 rs6000_isa_flags_explicit
|= OPTION_MASK_VSX
;
4262 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
4263 the -mcpu setting to enable options that conflict. */
4264 if ((!TARGET_HARD_FLOAT
|| !TARGET_ALTIVEC
|| !TARGET_VSX
)
4265 && (rs6000_isa_flags_explicit
& (OPTION_MASK_SOFT_FLOAT
4266 | OPTION_MASK_ALTIVEC
4267 | OPTION_MASK_VSX
)) != 0)
4268 rs6000_isa_flags
&= ~((OPTION_MASK_P8_VECTOR
| OPTION_MASK_CRYPTO
4269 | OPTION_MASK_DIRECT_MOVE
)
4270 & ~rs6000_isa_flags_explicit
);
4272 if (TARGET_DEBUG_REG
|| TARGET_DEBUG_TARGET
)
4273 rs6000_print_isa_options (stderr
, 0, "before defaults", rs6000_isa_flags
);
4275 /* Handle explicit -mno-{altivec,vsx,power8-vector,power9-vector} and turn
4276 off all of the options that depend on those flags. */
4277 ignore_masks
= rs6000_disable_incompatible_switches ();
4279 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
4280 unless the user explicitly used the -mno-<option> to disable the code. */
4281 if (TARGET_P9_VECTOR
|| TARGET_MODULO
|| TARGET_P9_MISC
)
4282 rs6000_isa_flags
|= (ISA_3_0_MASKS_SERVER
& ~ignore_masks
);
4283 else if (TARGET_P9_MINMAX
)
4287 if (cpu_index
== PROCESSOR_POWER9
)
4289 /* legacy behavior: allow -mcpu=power9 with certain
4290 capabilities explicitly disabled. */
4291 rs6000_isa_flags
|= (ISA_3_0_MASKS_SERVER
& ~ignore_masks
);
4294 error ("power9 target option is incompatible with %<%s=<xxx>%> "
4295 "for <xxx> less than power9", "-mcpu");
4297 else if ((ISA_3_0_MASKS_SERVER
& rs6000_isa_flags_explicit
)
4298 != (ISA_3_0_MASKS_SERVER
& rs6000_isa_flags
4299 & rs6000_isa_flags_explicit
))
4300 /* Enforce that none of the ISA_3_0_MASKS_SERVER flags
4301 were explicitly cleared. */
4302 error ("%qs incompatible with explicitly disabled options",
4305 rs6000_isa_flags
|= ISA_3_0_MASKS_SERVER
;
4307 else if (TARGET_P8_VECTOR
|| TARGET_DIRECT_MOVE
|| TARGET_CRYPTO
)
4308 rs6000_isa_flags
|= (ISA_2_7_MASKS_SERVER
& ~ignore_masks
);
4309 else if (TARGET_VSX
)
4310 rs6000_isa_flags
|= (ISA_2_6_MASKS_SERVER
& ~ignore_masks
);
4311 else if (TARGET_POPCNTD
)
4312 rs6000_isa_flags
|= (ISA_2_6_MASKS_EMBEDDED
& ~ignore_masks
);
4313 else if (TARGET_DFP
)
4314 rs6000_isa_flags
|= (ISA_2_5_MASKS_SERVER
& ~ignore_masks
);
4315 else if (TARGET_CMPB
)
4316 rs6000_isa_flags
|= (ISA_2_5_MASKS_EMBEDDED
& ~ignore_masks
);
4317 else if (TARGET_FPRND
)
4318 rs6000_isa_flags
|= (ISA_2_4_MASKS
& ~ignore_masks
);
4319 else if (TARGET_POPCNTB
)
4320 rs6000_isa_flags
|= (ISA_2_2_MASKS
& ~ignore_masks
);
4321 else if (TARGET_ALTIVEC
)
4322 rs6000_isa_flags
|= (OPTION_MASK_PPC_GFXOPT
& ~ignore_masks
);
4324 if (TARGET_CRYPTO
&& !TARGET_ALTIVEC
)
4326 if (rs6000_isa_flags_explicit
& OPTION_MASK_CRYPTO
)
4327 error ("%qs requires %qs", "-mcrypto", "-maltivec");
4328 rs6000_isa_flags
&= ~OPTION_MASK_CRYPTO
;
4331 if (TARGET_DIRECT_MOVE
&& !TARGET_VSX
)
4333 if (rs6000_isa_flags_explicit
& OPTION_MASK_DIRECT_MOVE
)
4334 error ("%qs requires %qs", "-mdirect-move", "-mvsx");
4335 rs6000_isa_flags
&= ~OPTION_MASK_DIRECT_MOVE
;
4338 if (TARGET_P8_VECTOR
&& !TARGET_ALTIVEC
)
4340 if (rs6000_isa_flags_explicit
& OPTION_MASK_P8_VECTOR
)
4341 error ("%qs requires %qs", "-mpower8-vector", "-maltivec");
4342 rs6000_isa_flags
&= ~OPTION_MASK_P8_VECTOR
;
4345 if (TARGET_P8_VECTOR
&& !TARGET_VSX
)
4347 if ((rs6000_isa_flags_explicit
& OPTION_MASK_P8_VECTOR
)
4348 && (rs6000_isa_flags_explicit
& OPTION_MASK_VSX
))
4349 error ("%qs requires %qs", "-mpower8-vector", "-mvsx");
4350 else if ((rs6000_isa_flags_explicit
& OPTION_MASK_P8_VECTOR
) == 0)
4352 rs6000_isa_flags
&= ~OPTION_MASK_P8_VECTOR
;
4353 if (rs6000_isa_flags_explicit
& OPTION_MASK_VSX
)
4354 rs6000_isa_flags_explicit
|= OPTION_MASK_P8_VECTOR
;
4358 /* OPTION_MASK_P8_VECTOR is explicit, and OPTION_MASK_VSX is
4360 rs6000_isa_flags
|= OPTION_MASK_VSX
;
4361 rs6000_isa_flags_explicit
|= OPTION_MASK_VSX
;
4365 if (TARGET_DFP
&& !TARGET_HARD_FLOAT
)
4367 if (rs6000_isa_flags_explicit
& OPTION_MASK_DFP
)
4368 error ("%qs requires %qs", "-mhard-dfp", "-mhard-float");
4369 rs6000_isa_flags
&= ~OPTION_MASK_DFP
;
4372 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
4373 silently turn off quad memory mode. */
4374 if ((TARGET_QUAD_MEMORY
|| TARGET_QUAD_MEMORY_ATOMIC
) && !TARGET_POWERPC64
)
4376 if ((rs6000_isa_flags_explicit
& OPTION_MASK_QUAD_MEMORY
) != 0)
4377 warning (0, N_("-mquad-memory requires 64-bit mode"));
4379 if ((rs6000_isa_flags_explicit
& OPTION_MASK_QUAD_MEMORY_ATOMIC
) != 0)
4380 warning (0, N_("-mquad-memory-atomic requires 64-bit mode"));
4382 rs6000_isa_flags
&= ~(OPTION_MASK_QUAD_MEMORY
4383 | OPTION_MASK_QUAD_MEMORY_ATOMIC
);
4386 /* Non-atomic quad memory load/store are disabled for little endian, since
4387 the words are reversed, but atomic operations can still be done by
4388 swapping the words. */
4389 if (TARGET_QUAD_MEMORY
&& !WORDS_BIG_ENDIAN
)
4391 if ((rs6000_isa_flags_explicit
& OPTION_MASK_QUAD_MEMORY
) != 0)
4392 warning (0, N_("-mquad-memory is not available in little endian "
4395 rs6000_isa_flags
&= ~OPTION_MASK_QUAD_MEMORY
;
4398 /* Assume if the user asked for normal quad memory instructions, they want
4399 the atomic versions as well, unless they explicity told us not to use quad
4400 word atomic instructions. */
4401 if (TARGET_QUAD_MEMORY
4402 && !TARGET_QUAD_MEMORY_ATOMIC
4403 && ((rs6000_isa_flags_explicit
& OPTION_MASK_QUAD_MEMORY_ATOMIC
) == 0))
4404 rs6000_isa_flags
|= OPTION_MASK_QUAD_MEMORY_ATOMIC
;
4406 /* Enable power8 fusion if we are tuning for power8, even if we aren't
4407 generating power8 instructions. */
4408 if (!(rs6000_isa_flags_explicit
& OPTION_MASK_P8_FUSION
))
4409 rs6000_isa_flags
|= (processor_target_table
[tune_index
].target_enable
4410 & OPTION_MASK_P8_FUSION
);
4412 /* Setting additional fusion flags turns on base fusion. */
4413 if (!TARGET_P8_FUSION
&& (TARGET_P8_FUSION_SIGN
|| TARGET_TOC_FUSION
))
4415 if (rs6000_isa_flags_explicit
& OPTION_MASK_P8_FUSION
)
4417 if (TARGET_P8_FUSION_SIGN
)
4418 error ("%qs requires %qs", "-mpower8-fusion-sign",
4421 if (TARGET_TOC_FUSION
)
4422 error ("%qs requires %qs", "-mtoc-fusion", "-mpower8-fusion");
4424 rs6000_isa_flags
&= ~OPTION_MASK_P8_FUSION
;
4427 rs6000_isa_flags
|= OPTION_MASK_P8_FUSION
;
4430 /* Power9 fusion is a superset over power8 fusion. */
4431 if (TARGET_P9_FUSION
&& !TARGET_P8_FUSION
)
4433 if (rs6000_isa_flags_explicit
& OPTION_MASK_P8_FUSION
)
4435 /* We prefer to not mention undocumented options in
4436 error messages. However, if users have managed to select
4437 power9-fusion without selecting power8-fusion, they
4438 already know about undocumented flags. */
4439 error ("%qs requires %qs", "-mpower9-fusion", "-mpower8-fusion");
4440 rs6000_isa_flags
&= ~OPTION_MASK_P9_FUSION
;
4443 rs6000_isa_flags
|= OPTION_MASK_P8_FUSION
;
4446 /* Enable power9 fusion if we are tuning for power9, even if we aren't
4447 generating power9 instructions. */
4448 if (!(rs6000_isa_flags_explicit
& OPTION_MASK_P9_FUSION
))
4449 rs6000_isa_flags
|= (processor_target_table
[tune_index
].target_enable
4450 & OPTION_MASK_P9_FUSION
);
4452 /* Power8 does not fuse sign extended loads with the addis. If we are
4453 optimizing at high levels for speed, convert a sign extended load into a
4454 zero extending load, and an explicit sign extension. */
4455 if (TARGET_P8_FUSION
4456 && !(rs6000_isa_flags_explicit
& OPTION_MASK_P8_FUSION_SIGN
)
4457 && optimize_function_for_speed_p (cfun
)
4459 rs6000_isa_flags
|= OPTION_MASK_P8_FUSION_SIGN
;
4461 /* TOC fusion requires 64-bit and medium/large code model. */
4462 if (TARGET_TOC_FUSION
&& !TARGET_POWERPC64
)
4464 rs6000_isa_flags
&= ~OPTION_MASK_TOC_FUSION
;
4465 if ((rs6000_isa_flags_explicit
& OPTION_MASK_TOC_FUSION
) != 0)
4466 warning (0, N_("-mtoc-fusion requires 64-bit"));
4469 if (TARGET_TOC_FUSION
&& (TARGET_CMODEL
== CMODEL_SMALL
))
4471 rs6000_isa_flags
&= ~OPTION_MASK_TOC_FUSION
;
4472 if ((rs6000_isa_flags_explicit
& OPTION_MASK_TOC_FUSION
) != 0)
4473 warning (0, N_("-mtoc-fusion requires medium/large code model"));
4476 /* Turn on -mtoc-fusion by default if p8-fusion and 64-bit medium/large code
4478 if (TARGET_P8_FUSION
&& !TARGET_TOC_FUSION
&& TARGET_POWERPC64
4479 && (TARGET_CMODEL
!= CMODEL_SMALL
)
4480 && !(rs6000_isa_flags_explicit
& OPTION_MASK_TOC_FUSION
))
4481 rs6000_isa_flags
|= OPTION_MASK_TOC_FUSION
;
4483 /* ISA 3.0 vector instructions include ISA 2.07. */
4484 if (TARGET_P9_VECTOR
&& !TARGET_P8_VECTOR
)
4486 /* We prefer to not mention undocumented options in
4487 error messages. However, if users have managed to select
4488 power9-vector without selecting power8-vector, they
4489 already know about undocumented flags. */
4490 if ((rs6000_isa_flags_explicit
& OPTION_MASK_P9_VECTOR
) &&
4491 (rs6000_isa_flags_explicit
& OPTION_MASK_P8_VECTOR
))
4492 error ("%qs requires %qs", "-mpower9-vector", "-mpower8-vector");
4493 else if ((rs6000_isa_flags_explicit
& OPTION_MASK_P9_VECTOR
) == 0)
4495 rs6000_isa_flags
&= ~OPTION_MASK_P9_VECTOR
;
4496 if (rs6000_isa_flags_explicit
& OPTION_MASK_P8_VECTOR
)
4497 rs6000_isa_flags_explicit
|= OPTION_MASK_P9_VECTOR
;
4501 /* OPTION_MASK_P9_VECTOR is explicit and
4502 OPTION_MASK_P8_VECTOR is not explicit. */
4503 rs6000_isa_flags
|= OPTION_MASK_P8_VECTOR
;
4504 rs6000_isa_flags_explicit
|= OPTION_MASK_P8_VECTOR
;
4508 /* Set -mallow-movmisalign to explicitly on if we have full ISA 2.07
4509 support. If we only have ISA 2.06 support, and the user did not specify
4510 the switch, leave it set to -1 so the movmisalign patterns are enabled,
4511 but we don't enable the full vectorization support */
4512 if (TARGET_ALLOW_MOVMISALIGN
== -1 && TARGET_P8_VECTOR
&& TARGET_DIRECT_MOVE
)
4513 TARGET_ALLOW_MOVMISALIGN
= 1;
4515 else if (TARGET_ALLOW_MOVMISALIGN
&& !TARGET_VSX
)
4517 if (TARGET_ALLOW_MOVMISALIGN
> 0
4518 && global_options_set
.x_TARGET_ALLOW_MOVMISALIGN
)
4519 error ("%qs requires %qs", "-mallow-movmisalign", "-mvsx");
4521 TARGET_ALLOW_MOVMISALIGN
= 0;
4524 /* Determine when unaligned vector accesses are permitted, and when
4525 they are preferred over masked Altivec loads. Note that if
4526 TARGET_ALLOW_MOVMISALIGN has been disabled by the user, then
4527 TARGET_EFFICIENT_UNALIGNED_VSX must be as well. The converse is
4529 if (TARGET_EFFICIENT_UNALIGNED_VSX
)
4533 if (rs6000_isa_flags_explicit
& OPTION_MASK_EFFICIENT_UNALIGNED_VSX
)
4534 error ("%qs requires %qs", "-mefficient-unaligned-vsx", "-mvsx");
4536 rs6000_isa_flags
&= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX
;
4539 else if (!TARGET_ALLOW_MOVMISALIGN
)
4541 if (rs6000_isa_flags_explicit
& OPTION_MASK_EFFICIENT_UNALIGNED_VSX
)
4542 error ("%qs requires %qs", "-munefficient-unaligned-vsx",
4543 "-mallow-movmisalign");
4545 rs6000_isa_flags
&= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX
;
4549 /* Set long double size before the IEEE 128-bit tests. */
4550 if (!global_options_set
.x_rs6000_long_double_type_size
)
4552 if (main_target_opt
!= NULL
4553 && (main_target_opt
->x_rs6000_long_double_type_size
4554 != RS6000_DEFAULT_LONG_DOUBLE_SIZE
))
4555 error ("target attribute or pragma changes long double size");
4557 rs6000_long_double_type_size
= RS6000_DEFAULT_LONG_DOUBLE_SIZE
;
4560 /* Set -mabi=ieeelongdouble on some old targets. Note, AIX and Darwin
4561 explicitly redefine TARGET_IEEEQUAD to 0, so those systems will not
4562 pick up this default. */
4563 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
4564 if (!global_options_set
.x_rs6000_ieeequad
)
4565 rs6000_ieeequad
= 1;
4568 /* Enable the default support for IEEE 128-bit floating point on Linux VSX
4569 sytems. In GCC 7, we would enable the the IEEE 128-bit floating point
4570 infrastructure (-mfloat128-type) but not enable the actual __float128 type
4571 unless the user used the explicit -mfloat128. In GCC 8, we enable both
4572 the keyword as well as the type. */
4573 TARGET_FLOAT128_TYPE
= TARGET_FLOAT128_ENABLE_TYPE
&& TARGET_VSX
;
4575 /* IEEE 128-bit floating point requires VSX support. */
4576 if (TARGET_FLOAT128_KEYWORD
)
4580 if ((rs6000_isa_flags_explicit
& OPTION_MASK_FLOAT128_KEYWORD
) != 0)
4581 error ("%qs requires VSX support", "-mfloat128");
4583 TARGET_FLOAT128_TYPE
= 0;
4584 rs6000_isa_flags
&= ~(OPTION_MASK_FLOAT128_KEYWORD
4585 | OPTION_MASK_FLOAT128_HW
);
4587 else if (!TARGET_FLOAT128_TYPE
)
4589 TARGET_FLOAT128_TYPE
= 1;
4590 warning (0, "The -mfloat128 option may not be fully supported");
4594 /* Enable the __float128 keyword under Linux by default. */
4595 if (TARGET_FLOAT128_TYPE
&& !TARGET_FLOAT128_KEYWORD
4596 && (rs6000_isa_flags_explicit
& OPTION_MASK_FLOAT128_KEYWORD
) == 0)
4597 rs6000_isa_flags
|= OPTION_MASK_FLOAT128_KEYWORD
;
4599 /* If we have are supporting the float128 type and full ISA 3.0 support,
4600 enable -mfloat128-hardware by default. However, don't enable the
4601 __float128 keyword if it was explicitly turned off. 64-bit mode is needed
4602 because sometimes the compiler wants to put things in an integer
4603 container, and if we don't have __int128 support, it is impossible. */
4604 if (TARGET_FLOAT128_TYPE
&& !TARGET_FLOAT128_HW
&& TARGET_64BIT
4605 && (rs6000_isa_flags
& ISA_3_0_MASKS_IEEE
) == ISA_3_0_MASKS_IEEE
4606 && !(rs6000_isa_flags_explicit
& OPTION_MASK_FLOAT128_HW
))
4607 rs6000_isa_flags
|= OPTION_MASK_FLOAT128_HW
;
4609 if (TARGET_FLOAT128_HW
4610 && (rs6000_isa_flags
& ISA_3_0_MASKS_IEEE
) != ISA_3_0_MASKS_IEEE
)
4612 if ((rs6000_isa_flags_explicit
& OPTION_MASK_FLOAT128_HW
) != 0)
4613 error ("%qs requires full ISA 3.0 support", "-mfloat128-hardware");
4615 rs6000_isa_flags
&= ~OPTION_MASK_FLOAT128_HW
;
4618 if (TARGET_FLOAT128_HW
&& !TARGET_64BIT
)
4620 if ((rs6000_isa_flags_explicit
& OPTION_MASK_FLOAT128_HW
) != 0)
4621 error ("%qs requires %qs", "-mfloat128-hardware", "-m64");
4623 rs6000_isa_flags
&= ~OPTION_MASK_FLOAT128_HW
;
4626 /* Print the options after updating the defaults. */
4627 if (TARGET_DEBUG_REG
|| TARGET_DEBUG_TARGET
)
4628 rs6000_print_isa_options (stderr
, 0, "after defaults", rs6000_isa_flags
);
4630 /* E500mc does "better" if we inline more aggressively. Respect the
4631 user's opinion, though. */
4632 if (rs6000_block_move_inline_limit
== 0
4633 && (rs6000_cpu
== PROCESSOR_PPCE500MC
4634 || rs6000_cpu
== PROCESSOR_PPCE500MC64
4635 || rs6000_cpu
== PROCESSOR_PPCE5500
4636 || rs6000_cpu
== PROCESSOR_PPCE6500
))
4637 rs6000_block_move_inline_limit
= 128;
4639 /* store_one_arg depends on expand_block_move to handle at least the
4640 size of reg_parm_stack_space. */
4641 if (rs6000_block_move_inline_limit
< (TARGET_POWERPC64
? 64 : 32))
4642 rs6000_block_move_inline_limit
= (TARGET_POWERPC64
? 64 : 32);
4646 /* If the appropriate debug option is enabled, replace the target hooks
4647 with debug versions that call the real version and then prints
4648 debugging information. */
4649 if (TARGET_DEBUG_COST
)
4651 targetm
.rtx_costs
= rs6000_debug_rtx_costs
;
4652 targetm
.address_cost
= rs6000_debug_address_cost
;
4653 targetm
.sched
.adjust_cost
= rs6000_debug_adjust_cost
;
4656 if (TARGET_DEBUG_ADDR
)
4658 targetm
.legitimate_address_p
= rs6000_debug_legitimate_address_p
;
4659 targetm
.legitimize_address
= rs6000_debug_legitimize_address
;
4660 rs6000_secondary_reload_class_ptr
4661 = rs6000_debug_secondary_reload_class
;
4662 targetm
.secondary_memory_needed
4663 = rs6000_debug_secondary_memory_needed
;
4664 targetm
.can_change_mode_class
4665 = rs6000_debug_can_change_mode_class
;
4666 rs6000_preferred_reload_class_ptr
4667 = rs6000_debug_preferred_reload_class
;
4668 rs6000_legitimize_reload_address_ptr
4669 = rs6000_debug_legitimize_reload_address
;
4670 rs6000_mode_dependent_address_ptr
4671 = rs6000_debug_mode_dependent_address
;
4674 if (rs6000_veclibabi_name
)
4676 if (strcmp (rs6000_veclibabi_name
, "mass") == 0)
4677 rs6000_veclib_handler
= rs6000_builtin_vectorized_libmass
;
4680 error ("unknown vectorization library ABI type (%qs) for "
4681 "%qs switch", rs6000_veclibabi_name
, "-mveclibabi=");
4687 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
4688 target attribute or pragma which automatically enables both options,
4689 unless the altivec ABI was set. This is set by default for 64-bit, but
4691 if (main_target_opt
!= NULL
&& !main_target_opt
->x_rs6000_altivec_abi
)
4693 TARGET_FLOAT128_TYPE
= 0;
4694 rs6000_isa_flags
&= ~((OPTION_MASK_VSX
| OPTION_MASK_ALTIVEC
4695 | OPTION_MASK_FLOAT128_KEYWORD
)
4696 & ~rs6000_isa_flags_explicit
);
4699 /* Enable Altivec ABI for AIX -maltivec. */
4700 if (TARGET_XCOFF
&& (TARGET_ALTIVEC
|| TARGET_VSX
))
4702 if (main_target_opt
!= NULL
&& !main_target_opt
->x_rs6000_altivec_abi
)
4703 error ("target attribute or pragma changes AltiVec ABI");
4705 rs6000_altivec_abi
= 1;
4708 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
4709 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
4710 be explicitly overridden in either case. */
4713 if (!global_options_set
.x_rs6000_altivec_abi
4714 && (TARGET_64BIT
|| TARGET_ALTIVEC
|| TARGET_VSX
))
4716 if (main_target_opt
!= NULL
&&
4717 !main_target_opt
->x_rs6000_altivec_abi
)
4718 error ("target attribute or pragma changes AltiVec ABI");
4720 rs6000_altivec_abi
= 1;
4724 /* Set the Darwin64 ABI as default for 64-bit Darwin.
4725 So far, the only darwin64 targets are also MACH-O. */
4727 && DEFAULT_ABI
== ABI_DARWIN
4730 if (main_target_opt
!= NULL
&& !main_target_opt
->x_rs6000_darwin64_abi
)
4731 error ("target attribute or pragma changes darwin64 ABI");
4734 rs6000_darwin64_abi
= 1;
4735 /* Default to natural alignment, for better performance. */
4736 rs6000_alignment_flags
= MASK_ALIGN_NATURAL
;
4740 /* Place FP constants in the constant pool instead of TOC
4741 if section anchors enabled. */
4742 if (flag_section_anchors
4743 && !global_options_set
.x_TARGET_NO_FP_IN_TOC
)
4744 TARGET_NO_FP_IN_TOC
= 1;
4746 if (TARGET_DEBUG_REG
|| TARGET_DEBUG_TARGET
)
4747 rs6000_print_isa_options (stderr
, 0, "before subtarget", rs6000_isa_flags
);
4749 #ifdef SUBTARGET_OVERRIDE_OPTIONS
4750 SUBTARGET_OVERRIDE_OPTIONS
;
4752 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
4753 SUBSUBTARGET_OVERRIDE_OPTIONS
;
4755 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
4756 SUB3TARGET_OVERRIDE_OPTIONS
;
4759 if (TARGET_DEBUG_REG
|| TARGET_DEBUG_TARGET
)
4760 rs6000_print_isa_options (stderr
, 0, "after subtarget", rs6000_isa_flags
);
4762 /* For the E500 family of cores, reset the single/double FP flags to let us
4763 check that they remain constant across attributes or pragmas. Also,
4764 clear a possible request for string instructions, not supported and which
4765 we might have silently queried above for -Os.
4767 For other families, clear ISEL in case it was set implicitly.
4772 case PROCESSOR_PPC8540
:
4773 case PROCESSOR_PPC8548
:
4774 case PROCESSOR_PPCE500MC
:
4775 case PROCESSOR_PPCE500MC64
:
4776 case PROCESSOR_PPCE5500
:
4777 case PROCESSOR_PPCE6500
:
4779 rs6000_single_float
= 0;
4780 rs6000_double_float
= 0;
4782 rs6000_isa_flags
&= ~OPTION_MASK_STRING
;
4788 if (cpu_index
>= 0 && !(rs6000_isa_flags_explicit
& OPTION_MASK_ISEL
))
4789 rs6000_isa_flags
&= ~OPTION_MASK_ISEL
;
4794 if (main_target_opt
)
4796 if (main_target_opt
->x_rs6000_single_float
!= rs6000_single_float
)
4797 error ("target attribute or pragma changes single precision floating "
4799 if (main_target_opt
->x_rs6000_double_float
!= rs6000_double_float
)
4800 error ("target attribute or pragma changes double precision floating "
4804 rs6000_always_hint
= (rs6000_cpu
!= PROCESSOR_POWER4
4805 && rs6000_cpu
!= PROCESSOR_POWER5
4806 && rs6000_cpu
!= PROCESSOR_POWER6
4807 && rs6000_cpu
!= PROCESSOR_POWER7
4808 && rs6000_cpu
!= PROCESSOR_POWER8
4809 && rs6000_cpu
!= PROCESSOR_POWER9
4810 && rs6000_cpu
!= PROCESSOR_PPCA2
4811 && rs6000_cpu
!= PROCESSOR_CELL
4812 && rs6000_cpu
!= PROCESSOR_PPC476
);
4813 rs6000_sched_groups
= (rs6000_cpu
== PROCESSOR_POWER4
4814 || rs6000_cpu
== PROCESSOR_POWER5
4815 || rs6000_cpu
== PROCESSOR_POWER7
4816 || rs6000_cpu
== PROCESSOR_POWER8
);
4817 rs6000_align_branch_targets
= (rs6000_cpu
== PROCESSOR_POWER4
4818 || rs6000_cpu
== PROCESSOR_POWER5
4819 || rs6000_cpu
== PROCESSOR_POWER6
4820 || rs6000_cpu
== PROCESSOR_POWER7
4821 || rs6000_cpu
== PROCESSOR_POWER8
4822 || rs6000_cpu
== PROCESSOR_POWER9
4823 || rs6000_cpu
== PROCESSOR_PPCE500MC
4824 || rs6000_cpu
== PROCESSOR_PPCE500MC64
4825 || rs6000_cpu
== PROCESSOR_PPCE5500
4826 || rs6000_cpu
== PROCESSOR_PPCE6500
);
4828 /* Allow debug switches to override the above settings. These are set to -1
4829 in rs6000.opt to indicate the user hasn't directly set the switch. */
4830 if (TARGET_ALWAYS_HINT
>= 0)
4831 rs6000_always_hint
= TARGET_ALWAYS_HINT
;
4833 if (TARGET_SCHED_GROUPS
>= 0)
4834 rs6000_sched_groups
= TARGET_SCHED_GROUPS
;
4836 if (TARGET_ALIGN_BRANCH_TARGETS
>= 0)
4837 rs6000_align_branch_targets
= TARGET_ALIGN_BRANCH_TARGETS
;
4839 rs6000_sched_restricted_insns_priority
4840 = (rs6000_sched_groups
? 1 : 0);
4842 /* Handle -msched-costly-dep option. */
4843 rs6000_sched_costly_dep
4844 = (rs6000_sched_groups
? true_store_to_load_dep_costly
: no_dep_costly
);
4846 if (rs6000_sched_costly_dep_str
)
4848 if (! strcmp (rs6000_sched_costly_dep_str
, "no"))
4849 rs6000_sched_costly_dep
= no_dep_costly
;
4850 else if (! strcmp (rs6000_sched_costly_dep_str
, "all"))
4851 rs6000_sched_costly_dep
= all_deps_costly
;
4852 else if (! strcmp (rs6000_sched_costly_dep_str
, "true_store_to_load"))
4853 rs6000_sched_costly_dep
= true_store_to_load_dep_costly
;
4854 else if (! strcmp (rs6000_sched_costly_dep_str
, "store_to_load"))
4855 rs6000_sched_costly_dep
= store_to_load_dep_costly
;
4857 rs6000_sched_costly_dep
= ((enum rs6000_dependence_cost
)
4858 atoi (rs6000_sched_costly_dep_str
));
4861 /* Handle -minsert-sched-nops option. */
4862 rs6000_sched_insert_nops
4863 = (rs6000_sched_groups
? sched_finish_regroup_exact
: sched_finish_none
);
4865 if (rs6000_sched_insert_nops_str
)
4867 if (! strcmp (rs6000_sched_insert_nops_str
, "no"))
4868 rs6000_sched_insert_nops
= sched_finish_none
;
4869 else if (! strcmp (rs6000_sched_insert_nops_str
, "pad"))
4870 rs6000_sched_insert_nops
= sched_finish_pad_groups
;
4871 else if (! strcmp (rs6000_sched_insert_nops_str
, "regroup_exact"))
4872 rs6000_sched_insert_nops
= sched_finish_regroup_exact
;
4874 rs6000_sched_insert_nops
= ((enum rs6000_nop_insertion
)
4875 atoi (rs6000_sched_insert_nops_str
));
4878 /* Handle stack protector */
4879 if (!global_options_set
.x_rs6000_stack_protector_guard
)
4880 #ifdef TARGET_THREAD_SSP_OFFSET
4881 rs6000_stack_protector_guard
= SSP_TLS
;
4883 rs6000_stack_protector_guard
= SSP_GLOBAL
;
4886 #ifdef TARGET_THREAD_SSP_OFFSET
4887 rs6000_stack_protector_guard_offset
= TARGET_THREAD_SSP_OFFSET
;
4888 rs6000_stack_protector_guard_reg
= TARGET_64BIT
? 13 : 2;
4891 if (global_options_set
.x_rs6000_stack_protector_guard_offset_str
)
4894 const char *str
= rs6000_stack_protector_guard_offset_str
;
4897 long offset
= strtol (str
, &endp
, 0);
4898 if (!*str
|| *endp
|| errno
)
4899 error ("%qs is not a valid number in %qs", str
,
4900 "-mstack-protector-guard-offset=");
4902 if (!IN_RANGE (offset
, -0x8000, 0x7fff)
4903 || (TARGET_64BIT
&& (offset
& 3)))
4904 error ("%qs is not a valid offset in %qs", str
,
4905 "-mstack-protector-guard-offset=");
4907 rs6000_stack_protector_guard_offset
= offset
;
4910 if (global_options_set
.x_rs6000_stack_protector_guard_reg_str
)
4912 const char *str
= rs6000_stack_protector_guard_reg_str
;
4913 int reg
= decode_reg_name (str
);
4915 if (!IN_RANGE (reg
, 1, 31))
4916 error ("%qs is not a valid base register in %qs", str
,
4917 "-mstack-protector-guard-reg=");
4919 rs6000_stack_protector_guard_reg
= reg
;
4922 if (rs6000_stack_protector_guard
== SSP_TLS
4923 && !IN_RANGE (rs6000_stack_protector_guard_reg
, 1, 31))
4924 error ("%qs needs a valid base register", "-mstack-protector-guard=tls");
4928 #ifdef TARGET_REGNAMES
4929 /* If the user desires alternate register names, copy in the
4930 alternate names now. */
4931 if (TARGET_REGNAMES
)
4932 memcpy (rs6000_reg_names
, alt_reg_names
, sizeof (rs6000_reg_names
));
4935 /* Set aix_struct_return last, after the ABI is determined.
4936 If -maix-struct-return or -msvr4-struct-return was explicitly
4937 used, don't override with the ABI default. */
4938 if (!global_options_set
.x_aix_struct_return
)
4939 aix_struct_return
= (DEFAULT_ABI
!= ABI_V4
|| DRAFT_V4_STRUCT_RET
);
4942 /* IBM XL compiler defaults to unsigned bitfields. */
4943 if (TARGET_XL_COMPAT
)
4944 flag_signed_bitfields
= 0;
4947 if (TARGET_LONG_DOUBLE_128
&& !TARGET_IEEEQUAD
)
4948 REAL_MODE_FORMAT (TFmode
) = &ibm_extended_format
;
4950 ASM_GENERATE_INTERNAL_LABEL (toc_label_name
, "LCTOC", 1);
4952 /* We can only guarantee the availability of DI pseudo-ops when
4953 assembling for 64-bit targets. */
4956 targetm
.asm_out
.aligned_op
.di
= NULL
;
4957 targetm
.asm_out
.unaligned_op
.di
= NULL
;
4961 /* Set branch target alignment, if not optimizing for size. */
4964 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
4965 aligned 8byte to avoid misprediction by the branch predictor. */
4966 if (rs6000_cpu
== PROCESSOR_TITAN
4967 || rs6000_cpu
== PROCESSOR_CELL
)
4969 if (align_functions
<= 0)
4970 align_functions
= 8;
4971 if (align_jumps
<= 0)
4973 if (align_loops
<= 0)
4976 if (rs6000_align_branch_targets
)
4978 if (align_functions
<= 0)
4979 align_functions
= 16;
4980 if (align_jumps
<= 0)
4982 if (align_loops
<= 0)
4984 can_override_loop_align
= 1;
4988 if (align_jumps_max_skip
<= 0)
4989 align_jumps_max_skip
= 15;
4990 if (align_loops_max_skip
<= 0)
4991 align_loops_max_skip
= 15;
4994 /* Arrange to save and restore machine status around nested functions. */
4995 init_machine_status
= rs6000_init_machine_status
;
4997 /* We should always be splitting complex arguments, but we can't break
4998 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
4999 if (DEFAULT_ABI
== ABI_V4
|| DEFAULT_ABI
== ABI_DARWIN
)
5000 targetm
.calls
.split_complex_arg
= NULL
;
5002 /* The AIX and ELFv1 ABIs define standard function descriptors. */
5003 if (DEFAULT_ABI
== ABI_AIX
)
5004 targetm
.calls
.custom_function_descriptors
= 0;
5007 /* Initialize rs6000_cost with the appropriate target costs. */
5009 rs6000_cost
= TARGET_POWERPC64
? &size64_cost
: &size32_cost
;
5013 case PROCESSOR_RS64A
:
5014 rs6000_cost
= &rs64a_cost
;
5017 case PROCESSOR_MPCCORE
:
5018 rs6000_cost
= &mpccore_cost
;
5021 case PROCESSOR_PPC403
:
5022 rs6000_cost
= &ppc403_cost
;
5025 case PROCESSOR_PPC405
:
5026 rs6000_cost
= &ppc405_cost
;
5029 case PROCESSOR_PPC440
:
5030 rs6000_cost
= &ppc440_cost
;
5033 case PROCESSOR_PPC476
:
5034 rs6000_cost
= &ppc476_cost
;
5037 case PROCESSOR_PPC601
:
5038 rs6000_cost
= &ppc601_cost
;
5041 case PROCESSOR_PPC603
:
5042 rs6000_cost
= &ppc603_cost
;
5045 case PROCESSOR_PPC604
:
5046 rs6000_cost
= &ppc604_cost
;
5049 case PROCESSOR_PPC604e
:
5050 rs6000_cost
= &ppc604e_cost
;
5053 case PROCESSOR_PPC620
:
5054 rs6000_cost
= &ppc620_cost
;
5057 case PROCESSOR_PPC630
:
5058 rs6000_cost
= &ppc630_cost
;
5061 case PROCESSOR_CELL
:
5062 rs6000_cost
= &ppccell_cost
;
5065 case PROCESSOR_PPC750
:
5066 case PROCESSOR_PPC7400
:
5067 rs6000_cost
= &ppc750_cost
;
5070 case PROCESSOR_PPC7450
:
5071 rs6000_cost
= &ppc7450_cost
;
5074 case PROCESSOR_PPC8540
:
5075 case PROCESSOR_PPC8548
:
5076 rs6000_cost
= &ppc8540_cost
;
5079 case PROCESSOR_PPCE300C2
:
5080 case PROCESSOR_PPCE300C3
:
5081 rs6000_cost
= &ppce300c2c3_cost
;
5084 case PROCESSOR_PPCE500MC
:
5085 rs6000_cost
= &ppce500mc_cost
;
5088 case PROCESSOR_PPCE500MC64
:
5089 rs6000_cost
= &ppce500mc64_cost
;
5092 case PROCESSOR_PPCE5500
:
5093 rs6000_cost
= &ppce5500_cost
;
5096 case PROCESSOR_PPCE6500
:
5097 rs6000_cost
= &ppce6500_cost
;
5100 case PROCESSOR_TITAN
:
5101 rs6000_cost
= &titan_cost
;
5104 case PROCESSOR_POWER4
:
5105 case PROCESSOR_POWER5
:
5106 rs6000_cost
= &power4_cost
;
5109 case PROCESSOR_POWER6
:
5110 rs6000_cost
= &power6_cost
;
5113 case PROCESSOR_POWER7
:
5114 rs6000_cost
= &power7_cost
;
5117 case PROCESSOR_POWER8
:
5118 rs6000_cost
= &power8_cost
;
5121 case PROCESSOR_POWER9
:
5122 rs6000_cost
= &power9_cost
;
5125 case PROCESSOR_PPCA2
:
5126 rs6000_cost
= &ppca2_cost
;
5135 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES
,
5136 rs6000_cost
->simultaneous_prefetches
,
5137 global_options
.x_param_values
,
5138 global_options_set
.x_param_values
);
5139 maybe_set_param_value (PARAM_L1_CACHE_SIZE
, rs6000_cost
->l1_cache_size
,
5140 global_options
.x_param_values
,
5141 global_options_set
.x_param_values
);
5142 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE
,
5143 rs6000_cost
->cache_line_size
,
5144 global_options
.x_param_values
,
5145 global_options_set
.x_param_values
);
5146 maybe_set_param_value (PARAM_L2_CACHE_SIZE
, rs6000_cost
->l2_cache_size
,
5147 global_options
.x_param_values
,
5148 global_options_set
.x_param_values
);
5150 /* Increase loop peeling limits based on performance analysis. */
5151 maybe_set_param_value (PARAM_MAX_PEELED_INSNS
, 400,
5152 global_options
.x_param_values
,
5153 global_options_set
.x_param_values
);
5154 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS
, 400,
5155 global_options
.x_param_values
,
5156 global_options_set
.x_param_values
);
5158 /* Use the 'model' -fsched-pressure algorithm by default. */
5159 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM
,
5160 SCHED_PRESSURE_MODEL
,
5161 global_options
.x_param_values
,
5162 global_options_set
.x_param_values
);
5164 /* If using typedef char *va_list, signal that
5165 __builtin_va_start (&ap, 0) can be optimized to
5166 ap = __builtin_next_arg (0). */
5167 if (DEFAULT_ABI
!= ABI_V4
)
5168 targetm
.expand_builtin_va_start
= NULL
;
5171 /* Set up single/double float flags.
5172 If TARGET_HARD_FLOAT is set, but neither single or double is set,
5173 then set both flags. */
5174 if (TARGET_HARD_FLOAT
&& rs6000_single_float
== 0 && rs6000_double_float
== 0)
5175 rs6000_single_float
= rs6000_double_float
= 1;
5177 /* If not explicitly specified via option, decide whether to generate indexed
5178 load/store instructions. A value of -1 indicates that the
5179 initial value of this variable has not been overwritten. During
5180 compilation, TARGET_AVOID_XFORM is either 0 or 1. */
5181 if (TARGET_AVOID_XFORM
== -1)
5182 /* Avoid indexed addressing when targeting Power6 in order to avoid the
5183 DERAT mispredict penalty. However the LVE and STVE altivec instructions
5184 need indexed accesses and the type used is the scalar type of the element
5185 being loaded or stored. */
5186 TARGET_AVOID_XFORM
= (rs6000_cpu
== PROCESSOR_POWER6
&& TARGET_CMPB
5187 && !TARGET_ALTIVEC
);
5189 /* Set the -mrecip options. */
5190 if (rs6000_recip_name
)
5192 char *p
= ASTRDUP (rs6000_recip_name
);
5194 unsigned int mask
, i
;
5197 while ((q
= strtok (p
, ",")) != NULL
)
5208 if (!strcmp (q
, "default"))
5209 mask
= ((TARGET_RECIP_PRECISION
)
5210 ? RECIP_HIGH_PRECISION
: RECIP_LOW_PRECISION
);
5213 for (i
= 0; i
< ARRAY_SIZE (recip_options
); i
++)
5214 if (!strcmp (q
, recip_options
[i
].string
))
5216 mask
= recip_options
[i
].mask
;
5220 if (i
== ARRAY_SIZE (recip_options
))
5222 error ("unknown option for %<%s=%s%>", "-mrecip", q
);
5230 rs6000_recip_control
&= ~mask
;
5232 rs6000_recip_control
|= mask
;
5236 /* Set the builtin mask of the various options used that could affect which
5237 builtins were used. In the past we used target_flags, but we've run out
5238 of bits, and some options like PAIRED are no longer in target_flags. */
5239 rs6000_builtin_mask
= rs6000_builtin_mask_calculate ();
5240 if (TARGET_DEBUG_BUILTIN
|| TARGET_DEBUG_TARGET
)
5241 rs6000_print_builtin_options (stderr
, 0, "builtin mask",
5242 rs6000_builtin_mask
);
5244 /* Initialize all of the registers. */
5245 rs6000_init_hard_regno_mode_ok (global_init_p
);
5247 /* Save the initial options in case the user does function specific options */
5249 target_option_default_node
= target_option_current_node
5250 = build_target_option_node (&global_options
);
5252 /* If not explicitly specified via option, decide whether to generate the
5253 extra blr's required to preserve the link stack on some cpus (eg, 476). */
5254 if (TARGET_LINK_STACK
== -1)
5255 SET_TARGET_LINK_STACK (rs6000_cpu
== PROCESSOR_PPC476
&& flag_pic
);
5260 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
5261 define the target cpu type. */
5264 rs6000_option_override (void)
5266 (void) rs6000_option_override_internal (true);
5270 /* Implement targetm.vectorize.builtin_mask_for_load. */
5272 rs6000_builtin_mask_for_load (void)
5274 /* Don't use lvsl/vperm for P8 and similarly efficient machines. */
5275 if ((TARGET_ALTIVEC
&& !TARGET_VSX
)
5276 || (TARGET_VSX
&& !TARGET_EFFICIENT_UNALIGNED_VSX
))
5277 return altivec_builtin_mask_for_load
;
5282 /* Implement LOOP_ALIGN. */
5284 rs6000_loop_align (rtx label
)
5289 /* Don't override loop alignment if -falign-loops was specified. */
5290 if (!can_override_loop_align
)
5291 return align_loops_log
;
5293 bb
= BLOCK_FOR_INSN (label
);
5294 ninsns
= num_loop_insns(bb
->loop_father
);
5296 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
5297 if (ninsns
> 4 && ninsns
<= 8
5298 && (rs6000_cpu
== PROCESSOR_POWER4
5299 || rs6000_cpu
== PROCESSOR_POWER5
5300 || rs6000_cpu
== PROCESSOR_POWER6
5301 || rs6000_cpu
== PROCESSOR_POWER7
5302 || rs6000_cpu
== PROCESSOR_POWER8
5303 || rs6000_cpu
== PROCESSOR_POWER9
))
5306 return align_loops_log
;
5309 /* Implement TARGET_LOOP_ALIGN_MAX_SKIP. */
5311 rs6000_loop_align_max_skip (rtx_insn
*label
)
5313 return (1 << rs6000_loop_align (label
)) - 1;
5316 /* Return true iff, data reference of TYPE can reach vector alignment (16)
5317 after applying N number of iterations. This routine does not determine
5318 how may iterations are required to reach desired alignment. */
5321 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED
, bool is_packed
)
5328 if (rs6000_alignment_flags
== MASK_ALIGN_NATURAL
)
5331 if (rs6000_alignment_flags
== MASK_ALIGN_POWER
)
5341 /* Assuming that all other types are naturally aligned. CHECKME! */
5346 /* Return true if the vector misalignment factor is supported by the
5349 rs6000_builtin_support_vector_misalignment (machine_mode mode
,
5356 if (TARGET_EFFICIENT_UNALIGNED_VSX
)
5359 /* Return if movmisalign pattern is not supported for this mode. */
5360 if (optab_handler (movmisalign_optab
, mode
) == CODE_FOR_nothing
)
5363 if (misalignment
== -1)
5365 /* Misalignment factor is unknown at compile time but we know
5366 it's word aligned. */
5367 if (rs6000_vector_alignment_reachable (type
, is_packed
))
5369 int element_size
= TREE_INT_CST_LOW (TYPE_SIZE (type
));
5371 if (element_size
== 64 || element_size
== 32)
5378 /* VSX supports word-aligned vector. */
5379 if (misalignment
% 4 == 0)
5385 /* Implement targetm.vectorize.builtin_vectorization_cost. */
5387 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost
,
5388 tree vectype
, int misalign
)
5393 switch (type_of_cost
)
5403 case cond_branch_not_taken
:
5412 case vec_promote_demote
:
5418 case cond_branch_taken
:
5421 case unaligned_load
:
5422 case vector_gather_load
:
5423 if (TARGET_EFFICIENT_UNALIGNED_VSX
)
5426 if (TARGET_VSX
&& TARGET_ALLOW_MOVMISALIGN
)
5428 elements
= TYPE_VECTOR_SUBPARTS (vectype
);
5430 /* Double word aligned. */
5438 /* Double word aligned. */
5442 /* Unknown misalignment. */
5455 /* Misaligned loads are not supported. */
5460 case unaligned_store
:
5461 case vector_scatter_store
:
5462 if (TARGET_EFFICIENT_UNALIGNED_VSX
)
5465 if (TARGET_VSX
&& TARGET_ALLOW_MOVMISALIGN
)
5467 elements
= TYPE_VECTOR_SUBPARTS (vectype
);
5469 /* Double word aligned. */
5477 /* Double word aligned. */
5481 /* Unknown misalignment. */
5494 /* Misaligned stores are not supported. */
5500 /* This is a rough approximation assuming non-constant elements
5501 constructed into a vector via element insertion. FIXME:
5502 vec_construct is not granular enough for uniformly good
5503 decisions. If the initialization is a splat, this is
5504 cheaper than we estimate. Improve this someday. */
5505 elem_type
= TREE_TYPE (vectype
);
5506 /* 32-bit vectors loaded into registers are stored as double
5507 precision, so we need 2 permutes, 2 converts, and 1 merge
5508 to construct a vector of short floats from them. */
5509 if (SCALAR_FLOAT_TYPE_P (elem_type
)
5510 && TYPE_PRECISION (elem_type
) == 32)
5512 /* On POWER9, integer vector types are built up in GPRs and then
5513 use a direct move (2 cycles). For POWER8 this is even worse,
5514 as we need two direct moves and a merge, and the direct moves
5516 else if (INTEGRAL_TYPE_P (elem_type
))
5518 if (TARGET_P9_VECTOR
)
5519 return TYPE_VECTOR_SUBPARTS (vectype
) - 1 + 2;
5521 return TYPE_VECTOR_SUBPARTS (vectype
) - 1 + 5;
5524 /* V2DFmode doesn't need a direct move. */
5532 /* Implement targetm.vectorize.preferred_simd_mode. */
5535 rs6000_preferred_simd_mode (scalar_mode mode
)
5544 if (TARGET_ALTIVEC
|| TARGET_VSX
)
5561 if (TARGET_PAIRED_FLOAT
5567 typedef struct _rs6000_cost_data
5569 struct loop
*loop_info
;
5573 /* Test for likely overcommitment of vector hardware resources. If a
5574 loop iteration is relatively large, and too large a percentage of
5575 instructions in the loop are vectorized, the cost model may not
5576 adequately reflect delays from unavailable vector resources.
5577 Penalize the loop body cost for this case. */
5580 rs6000_density_test (rs6000_cost_data
*data
)
5582 const int DENSITY_PCT_THRESHOLD
= 85;
5583 const int DENSITY_SIZE_THRESHOLD
= 70;
5584 const int DENSITY_PENALTY
= 10;
5585 struct loop
*loop
= data
->loop_info
;
5586 basic_block
*bbs
= get_loop_body (loop
);
5587 int nbbs
= loop
->num_nodes
;
5588 int vec_cost
= data
->cost
[vect_body
], not_vec_cost
= 0;
5591 for (i
= 0; i
< nbbs
; i
++)
5593 basic_block bb
= bbs
[i
];
5594 gimple_stmt_iterator gsi
;
5596 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
5598 gimple
*stmt
= gsi_stmt (gsi
);
5599 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5601 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
5602 && !STMT_VINFO_IN_PATTERN_P (stmt_info
))
5608 density_pct
= (vec_cost
* 100) / (vec_cost
+ not_vec_cost
);
5610 if (density_pct
> DENSITY_PCT_THRESHOLD
5611 && vec_cost
+ not_vec_cost
> DENSITY_SIZE_THRESHOLD
)
5613 data
->cost
[vect_body
] = vec_cost
* (100 + DENSITY_PENALTY
) / 100;
5614 if (dump_enabled_p ())
5615 dump_printf_loc (MSG_NOTE
, vect_location
,
5616 "density %d%%, cost %d exceeds threshold, penalizing "
5617 "loop body cost by %d%%", density_pct
,
5618 vec_cost
+ not_vec_cost
, DENSITY_PENALTY
);
5622 /* Implement targetm.vectorize.init_cost. */
5624 /* For each vectorized loop, this var holds TRUE iff a non-memory vector
5625 instruction is needed by the vectorization. */
5626 static bool rs6000_vect_nonmem
;
5629 rs6000_init_cost (struct loop
*loop_info
)
5631 rs6000_cost_data
*data
= XNEW (struct _rs6000_cost_data
);
5632 data
->loop_info
= loop_info
;
5633 data
->cost
[vect_prologue
] = 0;
5634 data
->cost
[vect_body
] = 0;
5635 data
->cost
[vect_epilogue
] = 0;
5636 rs6000_vect_nonmem
= false;
5640 /* Implement targetm.vectorize.add_stmt_cost. */
5643 rs6000_add_stmt_cost (void *data
, int count
, enum vect_cost_for_stmt kind
,
5644 struct _stmt_vec_info
*stmt_info
, int misalign
,
5645 enum vect_cost_model_location where
)
5647 rs6000_cost_data
*cost_data
= (rs6000_cost_data
*) data
;
5648 unsigned retval
= 0;
5650 if (flag_vect_cost_model
)
5652 tree vectype
= stmt_info
? stmt_vectype (stmt_info
) : NULL_TREE
;
5653 int stmt_cost
= rs6000_builtin_vectorization_cost (kind
, vectype
,
5655 /* Statements in an inner loop relative to the loop being
5656 vectorized are weighted more heavily. The value here is
5657 arbitrary and could potentially be improved with analysis. */
5658 if (where
== vect_body
&& stmt_info
&& stmt_in_inner_loop_p (stmt_info
))
5659 count
*= 50; /* FIXME. */
5661 retval
= (unsigned) (count
* stmt_cost
);
5662 cost_data
->cost
[where
] += retval
;
5664 /* Check whether we're doing something other than just a copy loop.
5665 Not all such loops may be profitably vectorized; see
5666 rs6000_finish_cost. */
5667 if ((kind
== vec_to_scalar
|| kind
== vec_perm
5668 || kind
== vec_promote_demote
|| kind
== vec_construct
5669 || kind
== scalar_to_vec
)
5670 || (where
== vect_body
&& kind
== vector_stmt
))
5671 rs6000_vect_nonmem
= true;
5677 /* Implement targetm.vectorize.finish_cost. */
5680 rs6000_finish_cost (void *data
, unsigned *prologue_cost
,
5681 unsigned *body_cost
, unsigned *epilogue_cost
)
5683 rs6000_cost_data
*cost_data
= (rs6000_cost_data
*) data
;
5685 if (cost_data
->loop_info
)
5686 rs6000_density_test (cost_data
);
5688 /* Don't vectorize minimum-vectorization-factor, simple copy loops
5689 that require versioning for any reason. The vectorization is at
5690 best a wash inside the loop, and the versioning checks make
5691 profitability highly unlikely and potentially quite harmful. */
5692 if (cost_data
->loop_info
)
5694 loop_vec_info vec_info
= loop_vec_info_for_loop (cost_data
->loop_info
);
5695 if (!rs6000_vect_nonmem
5696 && LOOP_VINFO_VECT_FACTOR (vec_info
) == 2
5697 && LOOP_REQUIRES_VERSIONING (vec_info
))
5698 cost_data
->cost
[vect_body
] += 10000;
5701 *prologue_cost
= cost_data
->cost
[vect_prologue
];
5702 *body_cost
= cost_data
->cost
[vect_body
];
5703 *epilogue_cost
= cost_data
->cost
[vect_epilogue
];
5706 /* Implement targetm.vectorize.destroy_cost_data. */
5709 rs6000_destroy_cost_data (void *data
)
5714 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
5715 library with vectorized intrinsics. */
5718 rs6000_builtin_vectorized_libmass (combined_fn fn
, tree type_out
,
5722 const char *suffix
= NULL
;
5723 tree fntype
, new_fndecl
, bdecl
= NULL_TREE
;
5726 machine_mode el_mode
, in_mode
;
5729 /* Libmass is suitable for unsafe math only as it does not correctly support
5730 parts of IEEE with the required precision such as denormals. Only support
5731 it if we have VSX to use the simd d2 or f4 functions.
5732 XXX: Add variable length support. */
5733 if (!flag_unsafe_math_optimizations
|| !TARGET_VSX
)
5736 el_mode
= TYPE_MODE (TREE_TYPE (type_out
));
5737 n
= TYPE_VECTOR_SUBPARTS (type_out
);
5738 in_mode
= TYPE_MODE (TREE_TYPE (type_in
));
5739 in_n
= TYPE_VECTOR_SUBPARTS (type_in
);
5740 if (el_mode
!= in_mode
5776 if (el_mode
== DFmode
&& n
== 2)
5778 bdecl
= mathfn_built_in (double_type_node
, fn
);
5779 suffix
= "d2"; /* pow -> powd2 */
5781 else if (el_mode
== SFmode
&& n
== 4)
5783 bdecl
= mathfn_built_in (float_type_node
, fn
);
5784 suffix
= "4"; /* powf -> powf4 */
5796 gcc_assert (suffix
!= NULL
);
5797 bname
= IDENTIFIER_POINTER (DECL_NAME (bdecl
));
5801 strcpy (name
, bname
+ sizeof ("__builtin_") - 1);
5802 strcat (name
, suffix
);
5805 fntype
= build_function_type_list (type_out
, type_in
, NULL
);
5806 else if (n_args
== 2)
5807 fntype
= build_function_type_list (type_out
, type_in
, type_in
, NULL
);
5811 /* Build a function declaration for the vectorized function. */
5812 new_fndecl
= build_decl (BUILTINS_LOCATION
,
5813 FUNCTION_DECL
, get_identifier (name
), fntype
);
5814 TREE_PUBLIC (new_fndecl
) = 1;
5815 DECL_EXTERNAL (new_fndecl
) = 1;
5816 DECL_IS_NOVOPS (new_fndecl
) = 1;
5817 TREE_READONLY (new_fndecl
) = 1;
5822 /* Returns a function decl for a vectorized version of the builtin function
5823 with builtin function code FN and the result vector type TYPE, or NULL_TREE
5824 if it is not available. */
5827 rs6000_builtin_vectorized_function (unsigned int fn
, tree type_out
,
5830 machine_mode in_mode
, out_mode
;
5833 if (TARGET_DEBUG_BUILTIN
)
5834 fprintf (stderr
, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
5835 combined_fn_name (combined_fn (fn
)),
5836 GET_MODE_NAME (TYPE_MODE (type_out
)),
5837 GET_MODE_NAME (TYPE_MODE (type_in
)));
5839 if (TREE_CODE (type_out
) != VECTOR_TYPE
5840 || TREE_CODE (type_in
) != VECTOR_TYPE
)
5843 out_mode
= TYPE_MODE (TREE_TYPE (type_out
));
5844 out_n
= TYPE_VECTOR_SUBPARTS (type_out
);
5845 in_mode
= TYPE_MODE (TREE_TYPE (type_in
));
5846 in_n
= TYPE_VECTOR_SUBPARTS (type_in
);
5851 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5852 && out_mode
== DFmode
&& out_n
== 2
5853 && in_mode
== DFmode
&& in_n
== 2)
5854 return rs6000_builtin_decls
[VSX_BUILTIN_CPSGNDP
];
5855 if (VECTOR_UNIT_VSX_P (V4SFmode
)
5856 && out_mode
== SFmode
&& out_n
== 4
5857 && in_mode
== SFmode
&& in_n
== 4)
5858 return rs6000_builtin_decls
[VSX_BUILTIN_CPSGNSP
];
5859 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
)
5860 && out_mode
== SFmode
&& out_n
== 4
5861 && in_mode
== SFmode
&& in_n
== 4)
5862 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_COPYSIGN_V4SF
];
5865 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5866 && out_mode
== DFmode
&& out_n
== 2
5867 && in_mode
== DFmode
&& in_n
== 2)
5868 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIP
];
5869 if (VECTOR_UNIT_VSX_P (V4SFmode
)
5870 && out_mode
== SFmode
&& out_n
== 4
5871 && in_mode
== SFmode
&& in_n
== 4)
5872 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIP
];
5873 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
)
5874 && out_mode
== SFmode
&& out_n
== 4
5875 && in_mode
== SFmode
&& in_n
== 4)
5876 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRFIP
];
5879 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5880 && out_mode
== DFmode
&& out_n
== 2
5881 && in_mode
== DFmode
&& in_n
== 2)
5882 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIM
];
5883 if (VECTOR_UNIT_VSX_P (V4SFmode
)
5884 && out_mode
== SFmode
&& out_n
== 4
5885 && in_mode
== SFmode
&& in_n
== 4)
5886 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIM
];
5887 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
)
5888 && out_mode
== SFmode
&& out_n
== 4
5889 && in_mode
== SFmode
&& in_n
== 4)
5890 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRFIM
];
5893 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5894 && out_mode
== DFmode
&& out_n
== 2
5895 && in_mode
== DFmode
&& in_n
== 2)
5896 return rs6000_builtin_decls
[VSX_BUILTIN_XVMADDDP
];
5897 if (VECTOR_UNIT_VSX_P (V4SFmode
)
5898 && out_mode
== SFmode
&& out_n
== 4
5899 && in_mode
== SFmode
&& in_n
== 4)
5900 return rs6000_builtin_decls
[VSX_BUILTIN_XVMADDSP
];
5901 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
)
5902 && out_mode
== SFmode
&& out_n
== 4
5903 && in_mode
== SFmode
&& in_n
== 4)
5904 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VMADDFP
];
5907 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5908 && out_mode
== DFmode
&& out_n
== 2
5909 && in_mode
== DFmode
&& in_n
== 2)
5910 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIZ
];
5911 if (VECTOR_UNIT_VSX_P (V4SFmode
)
5912 && out_mode
== SFmode
&& out_n
== 4
5913 && in_mode
== SFmode
&& in_n
== 4)
5914 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIZ
];
5915 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
)
5916 && out_mode
== SFmode
&& out_n
== 4
5917 && in_mode
== SFmode
&& in_n
== 4)
5918 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRFIZ
];
5921 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5922 && flag_unsafe_math_optimizations
5923 && out_mode
== DFmode
&& out_n
== 2
5924 && in_mode
== DFmode
&& in_n
== 2)
5925 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPI
];
5926 if (VECTOR_UNIT_VSX_P (V4SFmode
)
5927 && flag_unsafe_math_optimizations
5928 && out_mode
== SFmode
&& out_n
== 4
5929 && in_mode
== SFmode
&& in_n
== 4)
5930 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPI
];
5933 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5934 && !flag_trapping_math
5935 && out_mode
== DFmode
&& out_n
== 2
5936 && in_mode
== DFmode
&& in_n
== 2)
5937 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIC
];
5938 if (VECTOR_UNIT_VSX_P (V4SFmode
)
5939 && !flag_trapping_math
5940 && out_mode
== SFmode
&& out_n
== 4
5941 && in_mode
== SFmode
&& in_n
== 4)
5942 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIC
];
5948 /* Generate calls to libmass if appropriate. */
5949 if (rs6000_veclib_handler
)
5950 return rs6000_veclib_handler (combined_fn (fn
), type_out
, type_in
);
5955 /* Implement TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION. */
5958 rs6000_builtin_md_vectorized_function (tree fndecl
, tree type_out
,
5961 machine_mode in_mode
, out_mode
;
5964 if (TARGET_DEBUG_BUILTIN
)
5965 fprintf (stderr
, "rs6000_builtin_md_vectorized_function (%s, %s, %s)\n",
5966 IDENTIFIER_POINTER (DECL_NAME (fndecl
)),
5967 GET_MODE_NAME (TYPE_MODE (type_out
)),
5968 GET_MODE_NAME (TYPE_MODE (type_in
)));
5970 if (TREE_CODE (type_out
) != VECTOR_TYPE
5971 || TREE_CODE (type_in
) != VECTOR_TYPE
)
5974 out_mode
= TYPE_MODE (TREE_TYPE (type_out
));
5975 out_n
= TYPE_VECTOR_SUBPARTS (type_out
);
5976 in_mode
= TYPE_MODE (TREE_TYPE (type_in
));
5977 in_n
= TYPE_VECTOR_SUBPARTS (type_in
);
5979 enum rs6000_builtins fn
5980 = (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
5983 case RS6000_BUILTIN_RSQRTF
:
5984 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
)
5985 && out_mode
== SFmode
&& out_n
== 4
5986 && in_mode
== SFmode
&& in_n
== 4)
5987 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRSQRTFP
];
5989 case RS6000_BUILTIN_RSQRT
:
5990 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5991 && out_mode
== DFmode
&& out_n
== 2
5992 && in_mode
== DFmode
&& in_n
== 2)
5993 return rs6000_builtin_decls
[VSX_BUILTIN_RSQRT_2DF
];
5995 case RS6000_BUILTIN_RECIPF
:
5996 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
)
5997 && out_mode
== SFmode
&& out_n
== 4
5998 && in_mode
== SFmode
&& in_n
== 4)
5999 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRECIPFP
];
6001 case RS6000_BUILTIN_RECIP
:
6002 if (VECTOR_UNIT_VSX_P (V2DFmode
)
6003 && out_mode
== DFmode
&& out_n
== 2
6004 && in_mode
== DFmode
&& in_n
== 2)
6005 return rs6000_builtin_decls
[VSX_BUILTIN_RECIP_V2DF
];
6013 /* Default CPU string for rs6000*_file_start functions. */
6014 static const char *rs6000_default_cpu
;
6016 /* Do anything needed at the start of the asm file. */
6019 rs6000_file_start (void)
6022 const char *start
= buffer
;
6023 FILE *file
= asm_out_file
;
6025 rs6000_default_cpu
= TARGET_CPU_DEFAULT
;
6027 default_file_start ();
6029 if (flag_verbose_asm
)
6031 sprintf (buffer
, "\n%s rs6000/powerpc options:", ASM_COMMENT_START
);
6033 if (rs6000_default_cpu
!= 0 && rs6000_default_cpu
[0] != '\0')
6035 fprintf (file
, "%s --with-cpu=%s", start
, rs6000_default_cpu
);
6039 if (global_options_set
.x_rs6000_cpu_index
)
6041 fprintf (file
, "%s -mcpu=%s", start
,
6042 processor_target_table
[rs6000_cpu_index
].name
);
6046 if (global_options_set
.x_rs6000_tune_index
)
6048 fprintf (file
, "%s -mtune=%s", start
,
6049 processor_target_table
[rs6000_tune_index
].name
);
6053 if (PPC405_ERRATUM77
)
6055 fprintf (file
, "%s PPC405CR_ERRATUM77", start
);
6059 #ifdef USING_ELFOS_H
6060 switch (rs6000_sdata
)
6062 case SDATA_NONE
: fprintf (file
, "%s -msdata=none", start
); start
= ""; break;
6063 case SDATA_DATA
: fprintf (file
, "%s -msdata=data", start
); start
= ""; break;
6064 case SDATA_SYSV
: fprintf (file
, "%s -msdata=sysv", start
); start
= ""; break;
6065 case SDATA_EABI
: fprintf (file
, "%s -msdata=eabi", start
); start
= ""; break;
6068 if (rs6000_sdata
&& g_switch_value
)
6070 fprintf (file
, "%s -G %d", start
,
6080 #ifdef USING_ELFOS_H
6081 if (!(rs6000_default_cpu
&& rs6000_default_cpu
[0])
6082 && !global_options_set
.x_rs6000_cpu_index
)
6084 fputs ("\t.machine ", asm_out_file
);
6085 if ((rs6000_isa_flags
& OPTION_MASK_MODULO
) != 0)
6086 fputs ("power9\n", asm_out_file
);
6087 else if ((rs6000_isa_flags
& OPTION_MASK_DIRECT_MOVE
) != 0)
6088 fputs ("power8\n", asm_out_file
);
6089 else if ((rs6000_isa_flags
& OPTION_MASK_POPCNTD
) != 0)
6090 fputs ("power7\n", asm_out_file
);
6091 else if ((rs6000_isa_flags
& OPTION_MASK_CMPB
) != 0)
6092 fputs ("power6\n", asm_out_file
);
6093 else if ((rs6000_isa_flags
& OPTION_MASK_POPCNTB
) != 0)
6094 fputs ("power5\n", asm_out_file
);
6095 else if ((rs6000_isa_flags
& OPTION_MASK_MFCRF
) != 0)
6096 fputs ("power4\n", asm_out_file
);
6097 else if ((rs6000_isa_flags
& OPTION_MASK_POWERPC64
) != 0)
6098 fputs ("ppc64\n", asm_out_file
);
6100 fputs ("ppc\n", asm_out_file
);
6104 if (DEFAULT_ABI
== ABI_ELFv2
)
6105 fprintf (file
, "\t.abiversion 2\n");
6109 /* Return nonzero if this function is known to have a null epilogue. */
6112 direct_return (void)
6114 if (reload_completed
)
6116 rs6000_stack_t
*info
= rs6000_stack_info ();
6118 if (info
->first_gp_reg_save
== 32
6119 && info
->first_fp_reg_save
== 64
6120 && info
->first_altivec_reg_save
== LAST_ALTIVEC_REGNO
+ 1
6121 && ! info
->lr_save_p
6122 && ! info
->cr_save_p
6123 && info
->vrsave_size
== 0
6131 /* Return the number of instructions it takes to form a constant in an
6132 integer register. */
6135 num_insns_constant_wide (HOST_WIDE_INT value
)
6137 /* signed constant loadable with addi */
6138 if (((unsigned HOST_WIDE_INT
) value
+ 0x8000) < 0x10000)
6141 /* constant loadable with addis */
6142 else if ((value
& 0xffff) == 0
6143 && (value
>> 31 == -1 || value
>> 31 == 0))
6146 else if (TARGET_POWERPC64
)
6148 HOST_WIDE_INT low
= ((value
& 0xffffffff) ^ 0x80000000) - 0x80000000;
6149 HOST_WIDE_INT high
= value
>> 31;
6151 if (high
== 0 || high
== -1)
6157 return num_insns_constant_wide (high
) + 1;
6159 return num_insns_constant_wide (low
) + 1;
6161 return (num_insns_constant_wide (high
)
6162 + num_insns_constant_wide (low
) + 1);
6170 num_insns_constant (rtx op
, machine_mode mode
)
6172 HOST_WIDE_INT low
, high
;
6174 switch (GET_CODE (op
))
6177 if ((INTVAL (op
) >> 31) != 0 && (INTVAL (op
) >> 31) != -1
6178 && rs6000_is_valid_and_mask (op
, mode
))
6181 return num_insns_constant_wide (INTVAL (op
));
6183 case CONST_WIDE_INT
:
6186 int ins
= CONST_WIDE_INT_NUNITS (op
) - 1;
6187 for (i
= 0; i
< CONST_WIDE_INT_NUNITS (op
); i
++)
6188 ins
+= num_insns_constant_wide (CONST_WIDE_INT_ELT (op
, i
));
6193 if (mode
== SFmode
|| mode
== SDmode
)
6197 if (DECIMAL_FLOAT_MODE_P (mode
))
6198 REAL_VALUE_TO_TARGET_DECIMAL32
6199 (*CONST_DOUBLE_REAL_VALUE (op
), l
);
6201 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op
), l
);
6202 return num_insns_constant_wide ((HOST_WIDE_INT
) l
);
6206 if (DECIMAL_FLOAT_MODE_P (mode
))
6207 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (op
), l
);
6209 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op
), l
);
6210 high
= l
[WORDS_BIG_ENDIAN
== 0];
6211 low
= l
[WORDS_BIG_ENDIAN
!= 0];
6214 return (num_insns_constant_wide (low
)
6215 + num_insns_constant_wide (high
));
6218 if ((high
== 0 && low
>= 0)
6219 || (high
== -1 && low
< 0))
6220 return num_insns_constant_wide (low
);
6222 else if (rs6000_is_valid_and_mask (op
, mode
))
6226 return num_insns_constant_wide (high
) + 1;
6229 return (num_insns_constant_wide (high
)
6230 + num_insns_constant_wide (low
) + 1);
6238 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
6239 If the mode of OP is MODE_VECTOR_INT, this simply returns the
6240 corresponding element of the vector, but for V4SFmode and V2SFmode,
6241 the corresponding "float" is interpreted as an SImode integer. */
6244 const_vector_elt_as_int (rtx op
, unsigned int elt
)
6248 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
6249 gcc_assert (GET_MODE (op
) != V2DImode
6250 && GET_MODE (op
) != V2DFmode
);
6252 tmp
= CONST_VECTOR_ELT (op
, elt
);
6253 if (GET_MODE (op
) == V4SFmode
6254 || GET_MODE (op
) == V2SFmode
)
6255 tmp
= gen_lowpart (SImode
, tmp
);
6256 return INTVAL (tmp
);
6259 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
6260 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
6261 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
6262 all items are set to the same value and contain COPIES replicas of the
6263 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
6264 operand and the others are set to the value of the operand's msb. */
6267 vspltis_constant (rtx op
, unsigned step
, unsigned copies
)
6269 machine_mode mode
= GET_MODE (op
);
6270 machine_mode inner
= GET_MODE_INNER (mode
);
6278 HOST_WIDE_INT splat_val
;
6279 HOST_WIDE_INT msb_val
;
6281 if (mode
== V2DImode
|| mode
== V2DFmode
|| mode
== V1TImode
)
6284 nunits
= GET_MODE_NUNITS (mode
);
6285 bitsize
= GET_MODE_BITSIZE (inner
);
6286 mask
= GET_MODE_MASK (inner
);
6288 val
= const_vector_elt_as_int (op
, BYTES_BIG_ENDIAN
? nunits
- 1 : 0);
6290 msb_val
= val
>= 0 ? 0 : -1;
6292 /* Construct the value to be splatted, if possible. If not, return 0. */
6293 for (i
= 2; i
<= copies
; i
*= 2)
6295 HOST_WIDE_INT small_val
;
6297 small_val
= splat_val
>> bitsize
;
6299 if (splat_val
!= ((HOST_WIDE_INT
)
6300 ((unsigned HOST_WIDE_INT
) small_val
<< bitsize
)
6301 | (small_val
& mask
)))
6303 splat_val
= small_val
;
6306 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
6307 if (EASY_VECTOR_15 (splat_val
))
6310 /* Also check if we can splat, and then add the result to itself. Do so if
6311 the value is positive, of if the splat instruction is using OP's mode;
6312 for splat_val < 0, the splat and the add should use the same mode. */
6313 else if (EASY_VECTOR_15_ADD_SELF (splat_val
)
6314 && (splat_val
>= 0 || (step
== 1 && copies
== 1)))
6317 /* Also check if are loading up the most significant bit which can be done by
6318 loading up -1 and shifting the value left by -1. */
6319 else if (EASY_VECTOR_MSB (splat_val
, inner
))
6325 /* Check if VAL is present in every STEP-th element, and the
6326 other elements are filled with its most significant bit. */
6327 for (i
= 1; i
< nunits
; ++i
)
6329 HOST_WIDE_INT desired_val
;
6330 unsigned elt
= BYTES_BIG_ENDIAN
? nunits
- 1 - i
: i
;
6331 if ((i
& (step
- 1)) == 0)
6334 desired_val
= msb_val
;
6336 if (desired_val
!= const_vector_elt_as_int (op
, elt
))
6343 /* Like vsplitis_constant, but allow the value to be shifted left with a VSLDOI
6344 instruction, filling in the bottom elements with 0 or -1.
6346 Return 0 if the constant cannot be generated with VSLDOI. Return positive
6347 for the number of zeroes to shift in, or negative for the number of 0xff
6350 OP is a CONST_VECTOR. */
6353 vspltis_shifted (rtx op
)
6355 machine_mode mode
= GET_MODE (op
);
6356 machine_mode inner
= GET_MODE_INNER (mode
);
6364 if (mode
!= V16QImode
&& mode
!= V8HImode
&& mode
!= V4SImode
)
6367 /* We need to create pseudo registers to do the shift, so don't recognize
6368 shift vector constants after reload. */
6369 if (!can_create_pseudo_p ())
6372 nunits
= GET_MODE_NUNITS (mode
);
6373 mask
= GET_MODE_MASK (inner
);
6375 val
= const_vector_elt_as_int (op
, BYTES_BIG_ENDIAN
? 0 : nunits
- 1);
6377 /* Check if the value can really be the operand of a vspltis[bhw]. */
6378 if (EASY_VECTOR_15 (val
))
6381 /* Also check if we are loading up the most significant bit which can be done
6382 by loading up -1 and shifting the value left by -1. */
6383 else if (EASY_VECTOR_MSB (val
, inner
))
6389 /* Check if VAL is present in every STEP-th element until we find elements
6390 that are 0 or all 1 bits. */
6391 for (i
= 1; i
< nunits
; ++i
)
6393 unsigned elt
= BYTES_BIG_ENDIAN
? i
: nunits
- 1 - i
;
6394 HOST_WIDE_INT elt_val
= const_vector_elt_as_int (op
, elt
);
6396 /* If the value isn't the splat value, check for the remaining elements
6402 for (j
= i
+1; j
< nunits
; ++j
)
6404 unsigned elt2
= BYTES_BIG_ENDIAN
? j
: nunits
- 1 - j
;
6405 if (const_vector_elt_as_int (op
, elt2
) != 0)
6409 return (nunits
- i
) * GET_MODE_SIZE (inner
);
6412 else if ((elt_val
& mask
) == mask
)
6414 for (j
= i
+1; j
< nunits
; ++j
)
6416 unsigned elt2
= BYTES_BIG_ENDIAN
? j
: nunits
- 1 - j
;
6417 if ((const_vector_elt_as_int (op
, elt2
) & mask
) != mask
)
6421 return -((nunits
- i
) * GET_MODE_SIZE (inner
));
6429 /* If all elements are equal, we don't need to do VLSDOI. */
6434 /* Return true if OP is of the given MODE and can be synthesized
6435 with a vspltisb, vspltish or vspltisw. */
6438 easy_altivec_constant (rtx op
, machine_mode mode
)
6440 unsigned step
, copies
;
6442 if (mode
== VOIDmode
)
6443 mode
= GET_MODE (op
);
6444 else if (mode
!= GET_MODE (op
))
6447 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
6449 if (mode
== V2DFmode
)
6450 return zero_constant (op
, mode
);
6452 else if (mode
== V2DImode
)
6454 if (GET_CODE (CONST_VECTOR_ELT (op
, 0)) != CONST_INT
6455 || GET_CODE (CONST_VECTOR_ELT (op
, 1)) != CONST_INT
)
6458 if (zero_constant (op
, mode
))
6461 if (INTVAL (CONST_VECTOR_ELT (op
, 0)) == -1
6462 && INTVAL (CONST_VECTOR_ELT (op
, 1)) == -1)
6468 /* V1TImode is a special container for TImode. Ignore for now. */
6469 else if (mode
== V1TImode
)
6472 /* Start with a vspltisw. */
6473 step
= GET_MODE_NUNITS (mode
) / 4;
6476 if (vspltis_constant (op
, step
, copies
))
6479 /* Then try with a vspltish. */
6485 if (vspltis_constant (op
, step
, copies
))
6488 /* And finally a vspltisb. */
6494 if (vspltis_constant (op
, step
, copies
))
6497 if (vspltis_shifted (op
) != 0)
6503 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
6504 result is OP. Abort if it is not possible. */
6507 gen_easy_altivec_constant (rtx op
)
6509 machine_mode mode
= GET_MODE (op
);
6510 int nunits
= GET_MODE_NUNITS (mode
);
6511 rtx val
= CONST_VECTOR_ELT (op
, BYTES_BIG_ENDIAN
? nunits
- 1 : 0);
6512 unsigned step
= nunits
/ 4;
6513 unsigned copies
= 1;
6515 /* Start with a vspltisw. */
6516 if (vspltis_constant (op
, step
, copies
))
6517 return gen_rtx_VEC_DUPLICATE (V4SImode
, gen_lowpart (SImode
, val
));
6519 /* Then try with a vspltish. */
6525 if (vspltis_constant (op
, step
, copies
))
6526 return gen_rtx_VEC_DUPLICATE (V8HImode
, gen_lowpart (HImode
, val
));
6528 /* And finally a vspltisb. */
6534 if (vspltis_constant (op
, step
, copies
))
6535 return gen_rtx_VEC_DUPLICATE (V16QImode
, gen_lowpart (QImode
, val
));
6540 /* Return true if OP is of the given MODE and can be synthesized with ISA 3.0
6541 instructions (xxspltib, vupkhsb/vextsb2w/vextb2d).
6543 Return the number of instructions needed (1 or 2) into the address pointed
6546 Return the constant that is being split via CONSTANT_PTR. */
6549 xxspltib_constant_p (rtx op
,
6554 size_t nunits
= GET_MODE_NUNITS (mode
);
6556 HOST_WIDE_INT value
;
6559 /* Set the returned values to out of bound values. */
6560 *num_insns_ptr
= -1;
6561 *constant_ptr
= 256;
6563 if (!TARGET_P9_VECTOR
)
6566 if (mode
== VOIDmode
)
6567 mode
= GET_MODE (op
);
6569 else if (mode
!= GET_MODE (op
) && GET_MODE (op
) != VOIDmode
)
6572 /* Handle (vec_duplicate <constant>). */
6573 if (GET_CODE (op
) == VEC_DUPLICATE
)
6575 if (mode
!= V16QImode
&& mode
!= V8HImode
&& mode
!= V4SImode
6576 && mode
!= V2DImode
)
6579 element
= XEXP (op
, 0);
6580 if (!CONST_INT_P (element
))
6583 value
= INTVAL (element
);
6584 if (!IN_RANGE (value
, -128, 127))
6588 /* Handle (const_vector [...]). */
6589 else if (GET_CODE (op
) == CONST_VECTOR
)
6591 if (mode
!= V16QImode
&& mode
!= V8HImode
&& mode
!= V4SImode
6592 && mode
!= V2DImode
)
6595 element
= CONST_VECTOR_ELT (op
, 0);
6596 if (!CONST_INT_P (element
))
6599 value
= INTVAL (element
);
6600 if (!IN_RANGE (value
, -128, 127))
6603 for (i
= 1; i
< nunits
; i
++)
6605 element
= CONST_VECTOR_ELT (op
, i
);
6606 if (!CONST_INT_P (element
))
6609 if (value
!= INTVAL (element
))
6614 /* Handle integer constants being loaded into the upper part of the VSX
6615 register as a scalar. If the value isn't 0/-1, only allow it if the mode
6616 can go in Altivec registers. Prefer VSPLTISW/VUPKHSW over XXSPLITIB. */
6617 else if (CONST_INT_P (op
))
6619 if (!SCALAR_INT_MODE_P (mode
))
6622 value
= INTVAL (op
);
6623 if (!IN_RANGE (value
, -128, 127))
6626 if (!IN_RANGE (value
, -1, 0))
6628 if (!(reg_addr
[mode
].addr_mask
[RELOAD_REG_VMX
] & RELOAD_REG_VALID
))
6631 if (EASY_VECTOR_15 (value
))
6639 /* See if we could generate vspltisw/vspltish directly instead of xxspltib +
6640 sign extend. Special case 0/-1 to allow getting any VSX register instead
6641 of an Altivec register. */
6642 if ((mode
== V4SImode
|| mode
== V8HImode
) && !IN_RANGE (value
, -1, 0)
6643 && EASY_VECTOR_15 (value
))
6646 /* Return # of instructions and the constant byte for XXSPLTIB. */
6647 if (mode
== V16QImode
)
6650 else if (IN_RANGE (value
, -1, 0))
6656 *constant_ptr
= (int) value
;
6661 output_vec_const_move (rtx
*operands
)
6669 mode
= GET_MODE (dest
);
6673 bool dest_vmx_p
= ALTIVEC_REGNO_P (REGNO (dest
));
6674 int xxspltib_value
= 256;
6677 if (zero_constant (vec
, mode
))
6679 if (TARGET_P9_VECTOR
)
6680 return "xxspltib %x0,0";
6682 else if (dest_vmx_p
)
6683 return "vspltisw %0,0";
6686 return "xxlxor %x0,%x0,%x0";
6689 if (all_ones_constant (vec
, mode
))
6691 if (TARGET_P9_VECTOR
)
6692 return "xxspltib %x0,255";
6694 else if (dest_vmx_p
)
6695 return "vspltisw %0,-1";
6697 else if (TARGET_P8_VECTOR
)
6698 return "xxlorc %x0,%x0,%x0";
6704 if (TARGET_P9_VECTOR
6705 && xxspltib_constant_p (vec
, mode
, &num_insns
, &xxspltib_value
))
6709 operands
[2] = GEN_INT (xxspltib_value
& 0xff);
6710 return "xxspltib %x0,%2";
6721 gcc_assert (ALTIVEC_REGNO_P (REGNO (dest
)));
6722 if (zero_constant (vec
, mode
))
6723 return "vspltisw %0,0";
6725 if (all_ones_constant (vec
, mode
))
6726 return "vspltisw %0,-1";
6728 /* Do we need to construct a value using VSLDOI? */
6729 shift
= vspltis_shifted (vec
);
6733 splat_vec
= gen_easy_altivec_constant (vec
);
6734 gcc_assert (GET_CODE (splat_vec
) == VEC_DUPLICATE
);
6735 operands
[1] = XEXP (splat_vec
, 0);
6736 if (!EASY_VECTOR_15 (INTVAL (operands
[1])))
6739 switch (GET_MODE (splat_vec
))
6742 return "vspltisw %0,%1";
6745 return "vspltish %0,%1";
6748 return "vspltisb %0,%1";
6758 /* Initialize TARGET of vector PAIRED to VALS. */
6761 paired_expand_vector_init (rtx target
, rtx vals
)
6763 machine_mode mode
= GET_MODE (target
);
6764 int n_elts
= GET_MODE_NUNITS (mode
);
6766 rtx x
, new_rtx
, tmp
, constant_op
, op1
, op2
;
6769 for (i
= 0; i
< n_elts
; ++i
)
6771 x
= XVECEXP (vals
, 0, i
);
6772 if (!(CONST_SCALAR_INT_P (x
) || CONST_DOUBLE_P (x
) || CONST_FIXED_P (x
)))
6777 /* Load from constant pool. */
6778 emit_move_insn (target
, gen_rtx_CONST_VECTOR (mode
, XVEC (vals
, 0)));
6784 /* The vector is initialized only with non-constants. */
6785 new_rtx
= gen_rtx_VEC_CONCAT (V2SFmode
, XVECEXP (vals
, 0, 0),
6786 XVECEXP (vals
, 0, 1));
6788 emit_move_insn (target
, new_rtx
);
6792 /* One field is non-constant and the other one is a constant. Load the
6793 constant from the constant pool and use ps_merge instruction to
6794 construct the whole vector. */
6795 op1
= XVECEXP (vals
, 0, 0);
6796 op2
= XVECEXP (vals
, 0, 1);
6798 constant_op
= (CONSTANT_P (op1
)) ? op1
: op2
;
6800 tmp
= gen_reg_rtx (GET_MODE (constant_op
));
6801 emit_move_insn (tmp
, constant_op
);
6803 if (CONSTANT_P (op1
))
6804 new_rtx
= gen_rtx_VEC_CONCAT (V2SFmode
, tmp
, op2
);
6806 new_rtx
= gen_rtx_VEC_CONCAT (V2SFmode
, op1
, tmp
);
6808 emit_move_insn (target
, new_rtx
);
6812 paired_expand_vector_move (rtx operands
[])
6814 rtx op0
= operands
[0], op1
= operands
[1];
6816 emit_move_insn (op0
, op1
);
6819 /* Emit vector compare for code RCODE. DEST is destination, OP1 and
6820 OP2 are two VEC_COND_EXPR operands, CC_OP0 and CC_OP1 are the two
6821 operands for the relation operation COND. This is a recursive
6825 paired_emit_vector_compare (enum rtx_code rcode
,
6826 rtx dest
, rtx op0
, rtx op1
,
6827 rtx cc_op0
, rtx cc_op1
)
6829 rtx tmp
= gen_reg_rtx (V2SFmode
);
6832 gcc_assert (TARGET_PAIRED_FLOAT
);
6833 gcc_assert (GET_MODE (op0
) == GET_MODE (op1
));
6839 paired_emit_vector_compare (GE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
6843 emit_insn (gen_subv2sf3 (tmp
, cc_op0
, cc_op1
));
6844 emit_insn (gen_selv2sf4 (dest
, tmp
, op0
, op1
, CONST0_RTX (SFmode
)));
6848 paired_emit_vector_compare (GE
, dest
, op0
, op1
, cc_op1
, cc_op0
);
6851 paired_emit_vector_compare (LE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
6854 tmp1
= gen_reg_rtx (V2SFmode
);
6855 max
= gen_reg_rtx (V2SFmode
);
6856 min
= gen_reg_rtx (V2SFmode
);
6857 gen_reg_rtx (V2SFmode
);
6859 emit_insn (gen_subv2sf3 (tmp
, cc_op0
, cc_op1
));
6860 emit_insn (gen_selv2sf4
6861 (max
, tmp
, cc_op0
, cc_op1
, CONST0_RTX (SFmode
)));
6862 emit_insn (gen_subv2sf3 (tmp
, cc_op1
, cc_op0
));
6863 emit_insn (gen_selv2sf4
6864 (min
, tmp
, cc_op0
, cc_op1
, CONST0_RTX (SFmode
)));
6865 emit_insn (gen_subv2sf3 (tmp1
, min
, max
));
6866 emit_insn (gen_selv2sf4 (dest
, tmp1
, op0
, op1
, CONST0_RTX (SFmode
)));
6869 paired_emit_vector_compare (EQ
, dest
, op1
, op0
, cc_op0
, cc_op1
);
6872 paired_emit_vector_compare (LE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
6875 paired_emit_vector_compare (LT
, dest
, op1
, op0
, cc_op0
, cc_op1
);
6878 paired_emit_vector_compare (GE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
6881 paired_emit_vector_compare (GT
, dest
, op1
, op0
, cc_op0
, cc_op1
);
6890 /* Emit vector conditional expression.
6891 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
6892 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
6895 paired_emit_vector_cond_expr (rtx dest
, rtx op1
, rtx op2
,
6896 rtx cond
, rtx cc_op0
, rtx cc_op1
)
6898 enum rtx_code rcode
= GET_CODE (cond
);
6900 if (!TARGET_PAIRED_FLOAT
)
6903 paired_emit_vector_compare (rcode
, dest
, op1
, op2
, cc_op0
, cc_op1
);
6908 /* Initialize vector TARGET to VALS. */
6911 rs6000_expand_vector_init (rtx target
, rtx vals
)
6913 machine_mode mode
= GET_MODE (target
);
6914 machine_mode inner_mode
= GET_MODE_INNER (mode
);
6915 int n_elts
= GET_MODE_NUNITS (mode
);
6916 int n_var
= 0, one_var
= -1;
6917 bool all_same
= true, all_const_zero
= true;
6921 for (i
= 0; i
< n_elts
; ++i
)
6923 x
= XVECEXP (vals
, 0, i
);
6924 if (!(CONST_SCALAR_INT_P (x
) || CONST_DOUBLE_P (x
) || CONST_FIXED_P (x
)))
6925 ++n_var
, one_var
= i
;
6926 else if (x
!= CONST0_RTX (inner_mode
))
6927 all_const_zero
= false;
6929 if (i
> 0 && !rtx_equal_p (x
, XVECEXP (vals
, 0, 0)))
6935 rtx const_vec
= gen_rtx_CONST_VECTOR (mode
, XVEC (vals
, 0));
6936 bool int_vector_p
= (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
);
6937 if ((int_vector_p
|| TARGET_VSX
) && all_const_zero
)
6939 /* Zero register. */
6940 emit_move_insn (target
, CONST0_RTX (mode
));
6943 else if (int_vector_p
&& easy_vector_constant (const_vec
, mode
))
6945 /* Splat immediate. */
6946 emit_insn (gen_rtx_SET (target
, const_vec
));
6951 /* Load from constant pool. */
6952 emit_move_insn (target
, const_vec
);
6957 /* Double word values on VSX can use xxpermdi or lxvdsx. */
6958 if (VECTOR_MEM_VSX_P (mode
) && (mode
== V2DFmode
|| mode
== V2DImode
))
6962 size_t num_elements
= all_same
? 1 : 2;
6963 for (i
= 0; i
< num_elements
; i
++)
6965 op
[i
] = XVECEXP (vals
, 0, i
);
6966 /* Just in case there is a SUBREG with a smaller mode, do a
6968 if (GET_MODE (op
[i
]) != inner_mode
)
6970 rtx tmp
= gen_reg_rtx (inner_mode
);
6971 convert_move (tmp
, op
[i
], 0);
6974 /* Allow load with splat double word. */
6975 else if (MEM_P (op
[i
]))
6978 op
[i
] = force_reg (inner_mode
, op
[i
]);
6980 else if (!REG_P (op
[i
]))
6981 op
[i
] = force_reg (inner_mode
, op
[i
]);
6986 if (mode
== V2DFmode
)
6987 emit_insn (gen_vsx_splat_v2df (target
, op
[0]));
6989 emit_insn (gen_vsx_splat_v2di (target
, op
[0]));
6993 if (mode
== V2DFmode
)
6994 emit_insn (gen_vsx_concat_v2df (target
, op
[0], op
[1]));
6996 emit_insn (gen_vsx_concat_v2di (target
, op
[0], op
[1]));
7001 /* Special case initializing vector int if we are on 64-bit systems with
7002 direct move or we have the ISA 3.0 instructions. */
7003 if (mode
== V4SImode
&& VECTOR_MEM_VSX_P (V4SImode
)
7004 && TARGET_DIRECT_MOVE_64BIT
)
7008 rtx element0
= XVECEXP (vals
, 0, 0);
7009 if (MEM_P (element0
))
7010 element0
= rs6000_address_for_fpconvert (element0
);
7012 element0
= force_reg (SImode
, element0
);
7014 if (TARGET_P9_VECTOR
)
7015 emit_insn (gen_vsx_splat_v4si (target
, element0
));
7018 rtx tmp
= gen_reg_rtx (DImode
);
7019 emit_insn (gen_zero_extendsidi2 (tmp
, element0
));
7020 emit_insn (gen_vsx_splat_v4si_di (target
, tmp
));
7029 for (i
= 0; i
< 4; i
++)
7031 elements
[i
] = XVECEXP (vals
, 0, i
);
7032 if (!CONST_INT_P (elements
[i
]) && !REG_P (elements
[i
]))
7033 elements
[i
] = copy_to_mode_reg (SImode
, elements
[i
]);
7036 emit_insn (gen_vsx_init_v4si (target
, elements
[0], elements
[1],
7037 elements
[2], elements
[3]));
7042 /* With single precision floating point on VSX, know that internally single
7043 precision is actually represented as a double, and either make 2 V2DF
7044 vectors, and convert these vectors to single precision, or do one
7045 conversion, and splat the result to the other elements. */
7046 if (mode
== V4SFmode
&& VECTOR_MEM_VSX_P (V4SFmode
))
7050 rtx element0
= XVECEXP (vals
, 0, 0);
7052 if (TARGET_P9_VECTOR
)
7054 if (MEM_P (element0
))
7055 element0
= rs6000_address_for_fpconvert (element0
);
7057 emit_insn (gen_vsx_splat_v4sf (target
, element0
));
7062 rtx freg
= gen_reg_rtx (V4SFmode
);
7063 rtx sreg
= force_reg (SFmode
, element0
);
7064 rtx cvt
= (TARGET_XSCVDPSPN
7065 ? gen_vsx_xscvdpspn_scalar (freg
, sreg
)
7066 : gen_vsx_xscvdpsp_scalar (freg
, sreg
));
7069 emit_insn (gen_vsx_xxspltw_v4sf_direct (target
, freg
,
7075 rtx dbl_even
= gen_reg_rtx (V2DFmode
);
7076 rtx dbl_odd
= gen_reg_rtx (V2DFmode
);
7077 rtx flt_even
= gen_reg_rtx (V4SFmode
);
7078 rtx flt_odd
= gen_reg_rtx (V4SFmode
);
7079 rtx op0
= force_reg (SFmode
, XVECEXP (vals
, 0, 0));
7080 rtx op1
= force_reg (SFmode
, XVECEXP (vals
, 0, 1));
7081 rtx op2
= force_reg (SFmode
, XVECEXP (vals
, 0, 2));
7082 rtx op3
= force_reg (SFmode
, XVECEXP (vals
, 0, 3));
7084 /* Use VMRGEW if we can instead of doing a permute. */
7085 if (TARGET_P8_VECTOR
)
7087 emit_insn (gen_vsx_concat_v2sf (dbl_even
, op0
, op2
));
7088 emit_insn (gen_vsx_concat_v2sf (dbl_odd
, op1
, op3
));
7089 emit_insn (gen_vsx_xvcvdpsp (flt_even
, dbl_even
));
7090 emit_insn (gen_vsx_xvcvdpsp (flt_odd
, dbl_odd
));
7091 if (BYTES_BIG_ENDIAN
)
7092 emit_insn (gen_p8_vmrgew_v4sf_direct (target
, flt_even
, flt_odd
));
7094 emit_insn (gen_p8_vmrgew_v4sf_direct (target
, flt_odd
, flt_even
));
7098 emit_insn (gen_vsx_concat_v2sf (dbl_even
, op0
, op1
));
7099 emit_insn (gen_vsx_concat_v2sf (dbl_odd
, op2
, op3
));
7100 emit_insn (gen_vsx_xvcvdpsp (flt_even
, dbl_even
));
7101 emit_insn (gen_vsx_xvcvdpsp (flt_odd
, dbl_odd
));
7102 rs6000_expand_extract_even (target
, flt_even
, flt_odd
);
7108 /* Special case initializing vector short/char that are splats if we are on
7109 64-bit systems with direct move. */
7110 if (all_same
&& TARGET_DIRECT_MOVE_64BIT
7111 && (mode
== V16QImode
|| mode
== V8HImode
))
7113 rtx op0
= XVECEXP (vals
, 0, 0);
7114 rtx di_tmp
= gen_reg_rtx (DImode
);
7117 op0
= force_reg (GET_MODE_INNER (mode
), op0
);
7119 if (mode
== V16QImode
)
7121 emit_insn (gen_zero_extendqidi2 (di_tmp
, op0
));
7122 emit_insn (gen_vsx_vspltb_di (target
, di_tmp
));
7126 if (mode
== V8HImode
)
7128 emit_insn (gen_zero_extendhidi2 (di_tmp
, op0
));
7129 emit_insn (gen_vsx_vsplth_di (target
, di_tmp
));
7134 /* Store value to stack temp. Load vector element. Splat. However, splat
7135 of 64-bit items is not supported on Altivec. */
7136 if (all_same
&& GET_MODE_SIZE (inner_mode
) <= 4)
7138 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (inner_mode
));
7139 emit_move_insn (adjust_address_nv (mem
, inner_mode
, 0),
7140 XVECEXP (vals
, 0, 0));
7141 x
= gen_rtx_UNSPEC (VOIDmode
,
7142 gen_rtvec (1, const0_rtx
), UNSPEC_LVE
);
7143 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
7145 gen_rtx_SET (target
, mem
),
7147 x
= gen_rtx_VEC_SELECT (inner_mode
, target
,
7148 gen_rtx_PARALLEL (VOIDmode
,
7149 gen_rtvec (1, const0_rtx
)));
7150 emit_insn (gen_rtx_SET (target
, gen_rtx_VEC_DUPLICATE (mode
, x
)));
7154 /* One field is non-constant. Load constant then overwrite
7158 rtx copy
= copy_rtx (vals
);
7160 /* Load constant part of vector, substitute neighboring value for
7162 XVECEXP (copy
, 0, one_var
) = XVECEXP (vals
, 0, (one_var
+ 1) % n_elts
);
7163 rs6000_expand_vector_init (target
, copy
);
7165 /* Insert variable. */
7166 rs6000_expand_vector_set (target
, XVECEXP (vals
, 0, one_var
), one_var
);
7170 /* Construct the vector in memory one field at a time
7171 and load the whole vector. */
7172 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
));
7173 for (i
= 0; i
< n_elts
; i
++)
7174 emit_move_insn (adjust_address_nv (mem
, inner_mode
,
7175 i
* GET_MODE_SIZE (inner_mode
)),
7176 XVECEXP (vals
, 0, i
));
7177 emit_move_insn (target
, mem
);
7180 /* Set field ELT of TARGET to VAL. */
7183 rs6000_expand_vector_set (rtx target
, rtx val
, int elt
)
7185 machine_mode mode
= GET_MODE (target
);
7186 machine_mode inner_mode
= GET_MODE_INNER (mode
);
7187 rtx reg
= gen_reg_rtx (mode
);
7189 int width
= GET_MODE_SIZE (inner_mode
);
7192 val
= force_reg (GET_MODE (val
), val
);
7194 if (VECTOR_MEM_VSX_P (mode
))
7196 rtx insn
= NULL_RTX
;
7197 rtx elt_rtx
= GEN_INT (elt
);
7199 if (mode
== V2DFmode
)
7200 insn
= gen_vsx_set_v2df (target
, target
, val
, elt_rtx
);
7202 else if (mode
== V2DImode
)
7203 insn
= gen_vsx_set_v2di (target
, target
, val
, elt_rtx
);
7205 else if (TARGET_P9_VECTOR
&& TARGET_POWERPC64
)
7207 if (mode
== V4SImode
)
7208 insn
= gen_vsx_set_v4si_p9 (target
, target
, val
, elt_rtx
);
7209 else if (mode
== V8HImode
)
7210 insn
= gen_vsx_set_v8hi_p9 (target
, target
, val
, elt_rtx
);
7211 else if (mode
== V16QImode
)
7212 insn
= gen_vsx_set_v16qi_p9 (target
, target
, val
, elt_rtx
);
7213 else if (mode
== V4SFmode
)
7214 insn
= gen_vsx_set_v4sf_p9 (target
, target
, val
, elt_rtx
);
7224 /* Simplify setting single element vectors like V1TImode. */
7225 if (GET_MODE_SIZE (mode
) == GET_MODE_SIZE (inner_mode
) && elt
== 0)
7227 emit_move_insn (target
, gen_lowpart (mode
, val
));
7231 /* Load single variable value. */
7232 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (inner_mode
));
7233 emit_move_insn (adjust_address_nv (mem
, inner_mode
, 0), val
);
7234 x
= gen_rtx_UNSPEC (VOIDmode
,
7235 gen_rtvec (1, const0_rtx
), UNSPEC_LVE
);
7236 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
7238 gen_rtx_SET (reg
, mem
),
7241 /* Linear sequence. */
7242 mask
= gen_rtx_PARALLEL (V16QImode
, rtvec_alloc (16));
7243 for (i
= 0; i
< 16; ++i
)
7244 XVECEXP (mask
, 0, i
) = GEN_INT (i
);
7246 /* Set permute mask to insert element into target. */
7247 for (i
= 0; i
< width
; ++i
)
7248 XVECEXP (mask
, 0, elt
*width
+ i
)
7249 = GEN_INT (i
+ 0x10);
7250 x
= gen_rtx_CONST_VECTOR (V16QImode
, XVEC (mask
, 0));
7252 if (BYTES_BIG_ENDIAN
)
7253 x
= gen_rtx_UNSPEC (mode
,
7254 gen_rtvec (3, target
, reg
,
7255 force_reg (V16QImode
, x
)),
7259 if (TARGET_P9_VECTOR
)
7260 x
= gen_rtx_UNSPEC (mode
,
7261 gen_rtvec (3, target
, reg
,
7262 force_reg (V16QImode
, x
)),
7266 /* Invert selector. We prefer to generate VNAND on P8 so
7267 that future fusion opportunities can kick in, but must
7268 generate VNOR elsewhere. */
7269 rtx notx
= gen_rtx_NOT (V16QImode
, force_reg (V16QImode
, x
));
7270 rtx iorx
= (TARGET_P8_VECTOR
7271 ? gen_rtx_IOR (V16QImode
, notx
, notx
)
7272 : gen_rtx_AND (V16QImode
, notx
, notx
));
7273 rtx tmp
= gen_reg_rtx (V16QImode
);
7274 emit_insn (gen_rtx_SET (tmp
, iorx
));
7276 /* Permute with operands reversed and adjusted selector. */
7277 x
= gen_rtx_UNSPEC (mode
, gen_rtvec (3, reg
, target
, tmp
),
7282 emit_insn (gen_rtx_SET (target
, x
));
7285 /* Extract field ELT from VEC into TARGET. */
7288 rs6000_expand_vector_extract (rtx target
, rtx vec
, rtx elt
)
7290 machine_mode mode
= GET_MODE (vec
);
7291 machine_mode inner_mode
= GET_MODE_INNER (mode
);
7294 if (VECTOR_MEM_VSX_P (mode
) && CONST_INT_P (elt
))
7301 gcc_assert (INTVAL (elt
) == 0 && inner_mode
== TImode
);
7302 emit_move_insn (target
, gen_lowpart (TImode
, vec
));
7305 emit_insn (gen_vsx_extract_v2df (target
, vec
, elt
));
7308 emit_insn (gen_vsx_extract_v2di (target
, vec
, elt
));
7311 emit_insn (gen_vsx_extract_v4sf (target
, vec
, elt
));
7314 if (TARGET_DIRECT_MOVE_64BIT
)
7316 emit_insn (gen_vsx_extract_v16qi (target
, vec
, elt
));
7322 if (TARGET_DIRECT_MOVE_64BIT
)
7324 emit_insn (gen_vsx_extract_v8hi (target
, vec
, elt
));
7330 if (TARGET_DIRECT_MOVE_64BIT
)
7332 emit_insn (gen_vsx_extract_v4si (target
, vec
, elt
));
7338 else if (VECTOR_MEM_VSX_P (mode
) && !CONST_INT_P (elt
)
7339 && TARGET_DIRECT_MOVE_64BIT
)
7341 if (GET_MODE (elt
) != DImode
)
7343 rtx tmp
= gen_reg_rtx (DImode
);
7344 convert_move (tmp
, elt
, 0);
7347 else if (!REG_P (elt
))
7348 elt
= force_reg (DImode
, elt
);
7353 emit_insn (gen_vsx_extract_v2df_var (target
, vec
, elt
));
7357 emit_insn (gen_vsx_extract_v2di_var (target
, vec
, elt
));
7361 emit_insn (gen_vsx_extract_v4sf_var (target
, vec
, elt
));
7365 emit_insn (gen_vsx_extract_v4si_var (target
, vec
, elt
));
7369 emit_insn (gen_vsx_extract_v8hi_var (target
, vec
, elt
));
7373 emit_insn (gen_vsx_extract_v16qi_var (target
, vec
, elt
));
7381 gcc_assert (CONST_INT_P (elt
));
7383 /* Allocate mode-sized buffer. */
7384 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
));
7386 emit_move_insn (mem
, vec
);
7388 /* Add offset to field within buffer matching vector element. */
7389 mem
= adjust_address_nv (mem
, inner_mode
,
7390 INTVAL (elt
) * GET_MODE_SIZE (inner_mode
));
7392 emit_move_insn (target
, adjust_address_nv (mem
, inner_mode
, 0));
7395 /* Helper function to return the register number of a RTX. */
7397 regno_or_subregno (rtx op
)
7401 else if (SUBREG_P (op
))
7402 return subreg_regno (op
);
7407 /* Adjust a memory address (MEM) of a vector type to point to a scalar field
7408 within the vector (ELEMENT) with a mode (SCALAR_MODE). Use a base register
7409 temporary (BASE_TMP) to fixup the address. Return the new memory address
7410 that is valid for reads or writes to a given register (SCALAR_REG). */
7413 rs6000_adjust_vec_address (rtx scalar_reg
,
7417 machine_mode scalar_mode
)
7419 unsigned scalar_size
= GET_MODE_SIZE (scalar_mode
);
7420 rtx addr
= XEXP (mem
, 0);
7425 /* Vector addresses should not have PRE_INC, PRE_DEC, or PRE_MODIFY. */
7426 gcc_assert (GET_RTX_CLASS (GET_CODE (addr
)) != RTX_AUTOINC
);
7428 /* Calculate what we need to add to the address to get the element
7430 if (CONST_INT_P (element
))
7431 element_offset
= GEN_INT (INTVAL (element
) * scalar_size
);
7434 int byte_shift
= exact_log2 (scalar_size
);
7435 gcc_assert (byte_shift
>= 0);
7437 if (byte_shift
== 0)
7438 element_offset
= element
;
7442 if (TARGET_POWERPC64
)
7443 emit_insn (gen_ashldi3 (base_tmp
, element
, GEN_INT (byte_shift
)));
7445 emit_insn (gen_ashlsi3 (base_tmp
, element
, GEN_INT (byte_shift
)));
7447 element_offset
= base_tmp
;
7451 /* Create the new address pointing to the element within the vector. If we
7452 are adding 0, we don't have to change the address. */
7453 if (element_offset
== const0_rtx
)
7456 /* A simple indirect address can be converted into a reg + offset
7458 else if (REG_P (addr
) || SUBREG_P (addr
))
7459 new_addr
= gen_rtx_PLUS (Pmode
, addr
, element_offset
);
7461 /* Optimize D-FORM addresses with constant offset with a constant element, to
7462 include the element offset in the address directly. */
7463 else if (GET_CODE (addr
) == PLUS
)
7465 rtx op0
= XEXP (addr
, 0);
7466 rtx op1
= XEXP (addr
, 1);
7469 gcc_assert (REG_P (op0
) || SUBREG_P (op0
));
7470 if (CONST_INT_P (op1
) && CONST_INT_P (element_offset
))
7472 HOST_WIDE_INT offset
= INTVAL (op1
) + INTVAL (element_offset
);
7473 rtx offset_rtx
= GEN_INT (offset
);
7475 if (IN_RANGE (offset
, -32768, 32767)
7476 && (scalar_size
< 8 || (offset
& 0x3) == 0))
7477 new_addr
= gen_rtx_PLUS (Pmode
, op0
, offset_rtx
);
7480 emit_move_insn (base_tmp
, offset_rtx
);
7481 new_addr
= gen_rtx_PLUS (Pmode
, op0
, base_tmp
);
7486 bool op1_reg_p
= (REG_P (op1
) || SUBREG_P (op1
));
7487 bool ele_reg_p
= (REG_P (element_offset
) || SUBREG_P (element_offset
));
7489 /* Note, ADDI requires the register being added to be a base
7490 register. If the register was R0, load it up into the temporary
7493 && (ele_reg_p
|| reg_or_subregno (op1
) != FIRST_GPR_REGNO
))
7495 insn
= gen_add3_insn (base_tmp
, op1
, element_offset
);
7496 gcc_assert (insn
!= NULL_RTX
);
7501 && reg_or_subregno (element_offset
) != FIRST_GPR_REGNO
)
7503 insn
= gen_add3_insn (base_tmp
, element_offset
, op1
);
7504 gcc_assert (insn
!= NULL_RTX
);
7510 emit_move_insn (base_tmp
, op1
);
7511 emit_insn (gen_add2_insn (base_tmp
, element_offset
));
7514 new_addr
= gen_rtx_PLUS (Pmode
, op0
, base_tmp
);
7520 emit_move_insn (base_tmp
, addr
);
7521 new_addr
= gen_rtx_PLUS (Pmode
, base_tmp
, element_offset
);
7524 /* If we have a PLUS, we need to see whether the particular register class
7525 allows for D-FORM or X-FORM addressing. */
7526 if (GET_CODE (new_addr
) == PLUS
)
7528 rtx op1
= XEXP (new_addr
, 1);
7529 addr_mask_type addr_mask
;
7530 int scalar_regno
= regno_or_subregno (scalar_reg
);
7532 gcc_assert (scalar_regno
< FIRST_PSEUDO_REGISTER
);
7533 if (INT_REGNO_P (scalar_regno
))
7534 addr_mask
= reg_addr
[scalar_mode
].addr_mask
[RELOAD_REG_GPR
];
7536 else if (FP_REGNO_P (scalar_regno
))
7537 addr_mask
= reg_addr
[scalar_mode
].addr_mask
[RELOAD_REG_FPR
];
7539 else if (ALTIVEC_REGNO_P (scalar_regno
))
7540 addr_mask
= reg_addr
[scalar_mode
].addr_mask
[RELOAD_REG_VMX
];
7545 if (REG_P (op1
) || SUBREG_P (op1
))
7546 valid_addr_p
= (addr_mask
& RELOAD_REG_INDEXED
) != 0;
7548 valid_addr_p
= (addr_mask
& RELOAD_REG_OFFSET
) != 0;
7551 else if (REG_P (new_addr
) || SUBREG_P (new_addr
))
7552 valid_addr_p
= true;
7555 valid_addr_p
= false;
7559 emit_move_insn (base_tmp
, new_addr
);
7560 new_addr
= base_tmp
;
7563 return change_address (mem
, scalar_mode
, new_addr
);
7566 /* Split a variable vec_extract operation into the component instructions. */
7569 rs6000_split_vec_extract_var (rtx dest
, rtx src
, rtx element
, rtx tmp_gpr
,
7572 machine_mode mode
= GET_MODE (src
);
7573 machine_mode scalar_mode
= GET_MODE (dest
);
7574 unsigned scalar_size
= GET_MODE_SIZE (scalar_mode
);
7575 int byte_shift
= exact_log2 (scalar_size
);
7577 gcc_assert (byte_shift
>= 0);
7579 /* If we are given a memory address, optimize to load just the element. We
7580 don't have to adjust the vector element number on little endian
7584 gcc_assert (REG_P (tmp_gpr
));
7585 emit_move_insn (dest
, rs6000_adjust_vec_address (dest
, src
, element
,
7586 tmp_gpr
, scalar_mode
));
7590 else if (REG_P (src
) || SUBREG_P (src
))
7592 int bit_shift
= byte_shift
+ 3;
7594 int dest_regno
= regno_or_subregno (dest
);
7595 int src_regno
= regno_or_subregno (src
);
7596 int element_regno
= regno_or_subregno (element
);
7598 gcc_assert (REG_P (tmp_gpr
));
7600 /* See if we want to generate VEXTU{B,H,W}{L,R}X if the destination is in
7601 a general purpose register. */
7602 if (TARGET_P9_VECTOR
7603 && (mode
== V16QImode
|| mode
== V8HImode
|| mode
== V4SImode
)
7604 && INT_REGNO_P (dest_regno
)
7605 && ALTIVEC_REGNO_P (src_regno
)
7606 && INT_REGNO_P (element_regno
))
7608 rtx dest_si
= gen_rtx_REG (SImode
, dest_regno
);
7609 rtx element_si
= gen_rtx_REG (SImode
, element_regno
);
7611 if (mode
== V16QImode
)
7612 emit_insn (VECTOR_ELT_ORDER_BIG
7613 ? gen_vextublx (dest_si
, element_si
, src
)
7614 : gen_vextubrx (dest_si
, element_si
, src
));
7616 else if (mode
== V8HImode
)
7618 rtx tmp_gpr_si
= gen_rtx_REG (SImode
, REGNO (tmp_gpr
));
7619 emit_insn (gen_ashlsi3 (tmp_gpr_si
, element_si
, const1_rtx
));
7620 emit_insn (VECTOR_ELT_ORDER_BIG
7621 ? gen_vextuhlx (dest_si
, tmp_gpr_si
, src
)
7622 : gen_vextuhrx (dest_si
, tmp_gpr_si
, src
));
7628 rtx tmp_gpr_si
= gen_rtx_REG (SImode
, REGNO (tmp_gpr
));
7629 emit_insn (gen_ashlsi3 (tmp_gpr_si
, element_si
, const2_rtx
));
7630 emit_insn (VECTOR_ELT_ORDER_BIG
7631 ? gen_vextuwlx (dest_si
, tmp_gpr_si
, src
)
7632 : gen_vextuwrx (dest_si
, tmp_gpr_si
, src
));
7639 gcc_assert (REG_P (tmp_altivec
));
7641 /* For little endian, adjust element ordering. For V2DI/V2DF, we can use
7642 an XOR, otherwise we need to subtract. The shift amount is so VSLO
7643 will shift the element into the upper position (adding 3 to convert a
7644 byte shift into a bit shift). */
7645 if (scalar_size
== 8)
7647 if (!VECTOR_ELT_ORDER_BIG
)
7649 emit_insn (gen_xordi3 (tmp_gpr
, element
, const1_rtx
));
7655 /* Generate RLDIC directly to shift left 6 bits and retrieve 1
7657 emit_insn (gen_rtx_SET (tmp_gpr
,
7658 gen_rtx_AND (DImode
,
7659 gen_rtx_ASHIFT (DImode
,
7666 if (!VECTOR_ELT_ORDER_BIG
)
7668 rtx num_ele_m1
= GEN_INT (GET_MODE_NUNITS (mode
) - 1);
7670 emit_insn (gen_anddi3 (tmp_gpr
, element
, num_ele_m1
));
7671 emit_insn (gen_subdi3 (tmp_gpr
, num_ele_m1
, tmp_gpr
));
7677 emit_insn (gen_ashldi3 (tmp_gpr
, element2
, GEN_INT (bit_shift
)));
7680 /* Get the value into the lower byte of the Altivec register where VSLO
7682 if (TARGET_P9_VECTOR
)
7683 emit_insn (gen_vsx_splat_v2di (tmp_altivec
, tmp_gpr
));
7684 else if (can_create_pseudo_p ())
7685 emit_insn (gen_vsx_concat_v2di (tmp_altivec
, tmp_gpr
, tmp_gpr
));
7688 rtx tmp_di
= gen_rtx_REG (DImode
, REGNO (tmp_altivec
));
7689 emit_move_insn (tmp_di
, tmp_gpr
);
7690 emit_insn (gen_vsx_concat_v2di (tmp_altivec
, tmp_di
, tmp_di
));
7693 /* Do the VSLO to get the value into the final location. */
7697 emit_insn (gen_vsx_vslo_v2df (dest
, src
, tmp_altivec
));
7701 emit_insn (gen_vsx_vslo_v2di (dest
, src
, tmp_altivec
));
7706 rtx tmp_altivec_di
= gen_rtx_REG (DImode
, REGNO (tmp_altivec
));
7707 rtx tmp_altivec_v4sf
= gen_rtx_REG (V4SFmode
, REGNO (tmp_altivec
));
7708 rtx src_v2di
= gen_rtx_REG (V2DImode
, REGNO (src
));
7709 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di
, src_v2di
,
7712 emit_insn (gen_vsx_xscvspdp_scalar2 (dest
, tmp_altivec_v4sf
));
7720 rtx tmp_altivec_di
= gen_rtx_REG (DImode
, REGNO (tmp_altivec
));
7721 rtx src_v2di
= gen_rtx_REG (V2DImode
, REGNO (src
));
7722 rtx tmp_gpr_di
= gen_rtx_REG (DImode
, REGNO (dest
));
7723 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di
, src_v2di
,
7725 emit_move_insn (tmp_gpr_di
, tmp_altivec_di
);
7726 emit_insn (gen_ashrdi3 (tmp_gpr_di
, tmp_gpr_di
,
7727 GEN_INT (64 - (8 * scalar_size
))));
7741 /* Helper function for rs6000_split_v4si_init to build up a DImode value from
7742 two SImode values. */
7745 rs6000_split_v4si_init_di_reg (rtx dest
, rtx si1
, rtx si2
, rtx tmp
)
7747 const unsigned HOST_WIDE_INT mask_32bit
= HOST_WIDE_INT_C (0xffffffff);
7749 if (CONST_INT_P (si1
) && CONST_INT_P (si2
))
7751 unsigned HOST_WIDE_INT const1
= (UINTVAL (si1
) & mask_32bit
) << 32;
7752 unsigned HOST_WIDE_INT const2
= UINTVAL (si2
) & mask_32bit
;
7754 emit_move_insn (dest
, GEN_INT (const1
| const2
));
7758 /* Put si1 into upper 32-bits of dest. */
7759 if (CONST_INT_P (si1
))
7760 emit_move_insn (dest
, GEN_INT ((UINTVAL (si1
) & mask_32bit
) << 32));
7763 /* Generate RLDIC. */
7764 rtx si1_di
= gen_rtx_REG (DImode
, regno_or_subregno (si1
));
7765 rtx shift_rtx
= gen_rtx_ASHIFT (DImode
, si1_di
, GEN_INT (32));
7766 rtx mask_rtx
= GEN_INT (mask_32bit
<< 32);
7767 rtx and_rtx
= gen_rtx_AND (DImode
, shift_rtx
, mask_rtx
);
7768 gcc_assert (!reg_overlap_mentioned_p (dest
, si1
));
7769 emit_insn (gen_rtx_SET (dest
, and_rtx
));
7772 /* Put si2 into the temporary. */
7773 gcc_assert (!reg_overlap_mentioned_p (dest
, tmp
));
7774 if (CONST_INT_P (si2
))
7775 emit_move_insn (tmp
, GEN_INT (UINTVAL (si2
) & mask_32bit
));
7777 emit_insn (gen_zero_extendsidi2 (tmp
, si2
));
7779 /* Combine the two parts. */
7780 emit_insn (gen_iordi3 (dest
, dest
, tmp
));
7784 /* Split a V4SI initialization. */
7787 rs6000_split_v4si_init (rtx operands
[])
7789 rtx dest
= operands
[0];
7791 /* Destination is a GPR, build up the two DImode parts in place. */
7792 if (REG_P (dest
) || SUBREG_P (dest
))
7794 int d_regno
= regno_or_subregno (dest
);
7795 rtx scalar1
= operands
[1];
7796 rtx scalar2
= operands
[2];
7797 rtx scalar3
= operands
[3];
7798 rtx scalar4
= operands
[4];
7799 rtx tmp1
= operands
[5];
7800 rtx tmp2
= operands
[6];
7802 /* Even though we only need one temporary (plus the destination, which
7803 has an early clobber constraint, try to use two temporaries, one for
7804 each double word created. That way the 2nd insn scheduling pass can
7805 rearrange things so the two parts are done in parallel. */
7806 if (BYTES_BIG_ENDIAN
)
7808 rtx di_lo
= gen_rtx_REG (DImode
, d_regno
);
7809 rtx di_hi
= gen_rtx_REG (DImode
, d_regno
+ 1);
7810 rs6000_split_v4si_init_di_reg (di_lo
, scalar1
, scalar2
, tmp1
);
7811 rs6000_split_v4si_init_di_reg (di_hi
, scalar3
, scalar4
, tmp2
);
7815 rtx di_lo
= gen_rtx_REG (DImode
, d_regno
+ 1);
7816 rtx di_hi
= gen_rtx_REG (DImode
, d_regno
);
7817 gcc_assert (!VECTOR_ELT_ORDER_BIG
);
7818 rs6000_split_v4si_init_di_reg (di_lo
, scalar4
, scalar3
, tmp1
);
7819 rs6000_split_v4si_init_di_reg (di_hi
, scalar2
, scalar1
, tmp2
);
7828 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
7829 selects whether the alignment is abi mandated, optional, or
7830 both abi and optional alignment. */
7833 rs6000_data_alignment (tree type
, unsigned int align
, enum data_align how
)
7835 if (how
!= align_opt
)
7837 if (TREE_CODE (type
) == VECTOR_TYPE
)
7839 if (TARGET_PAIRED_FLOAT
&& PAIRED_VECTOR_MODE (TYPE_MODE (type
)))
7844 else if (align
< 128)
7849 if (how
!= align_abi
)
7851 if (TREE_CODE (type
) == ARRAY_TYPE
7852 && TYPE_MODE (TREE_TYPE (type
)) == QImode
)
7854 if (align
< BITS_PER_WORD
)
7855 align
= BITS_PER_WORD
;
7862 /* Implement TARGET_SLOW_UNALIGNED_ACCESS. Altivec vector memory
7863 instructions simply ignore the low bits; VSX memory instructions
7864 are aligned to 4 or 8 bytes. */
7867 rs6000_slow_unaligned_access (machine_mode mode
, unsigned int align
)
7869 return (STRICT_ALIGNMENT
7870 || (!TARGET_EFFICIENT_UNALIGNED_VSX
7871 && ((SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode
) && align
< 32)
7872 || ((VECTOR_MODE_P (mode
) || FLOAT128_VECTOR_P (mode
))
7873 && (int) align
< VECTOR_ALIGN (mode
)))));
7876 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
7879 rs6000_special_adjust_field_align_p (tree type
, unsigned int computed
)
7881 if (TARGET_ALTIVEC
&& TREE_CODE (type
) == VECTOR_TYPE
)
7883 if (computed
!= 128)
7886 if (!warned
&& warn_psabi
)
7889 inform (input_location
,
7890 "the layout of aggregates containing vectors with"
7891 " %d-byte alignment has changed in GCC 5",
7892 computed
/ BITS_PER_UNIT
);
7895 /* In current GCC there is no special case. */
7902 /* AIX increases natural record alignment to doubleword if the first
7903 field is an FP double while the FP fields remain word aligned. */
7906 rs6000_special_round_type_align (tree type
, unsigned int computed
,
7907 unsigned int specified
)
7909 unsigned int align
= MAX (computed
, specified
);
7910 tree field
= TYPE_FIELDS (type
);
7912 /* Skip all non field decls */
7913 while (field
!= NULL
&& TREE_CODE (field
) != FIELD_DECL
)
7914 field
= DECL_CHAIN (field
);
7916 if (field
!= NULL
&& field
!= type
)
7918 type
= TREE_TYPE (field
);
7919 while (TREE_CODE (type
) == ARRAY_TYPE
)
7920 type
= TREE_TYPE (type
);
7922 if (type
!= error_mark_node
&& TYPE_MODE (type
) == DFmode
)
7923 align
= MAX (align
, 64);
7929 /* Darwin increases record alignment to the natural alignment of
7933 darwin_rs6000_special_round_type_align (tree type
, unsigned int computed
,
7934 unsigned int specified
)
7936 unsigned int align
= MAX (computed
, specified
);
7938 if (TYPE_PACKED (type
))
7941 /* Find the first field, looking down into aggregates. */
7943 tree field
= TYPE_FIELDS (type
);
7944 /* Skip all non field decls */
7945 while (field
!= NULL
&& TREE_CODE (field
) != FIELD_DECL
)
7946 field
= DECL_CHAIN (field
);
7949 /* A packed field does not contribute any extra alignment. */
7950 if (DECL_PACKED (field
))
7952 type
= TREE_TYPE (field
);
7953 while (TREE_CODE (type
) == ARRAY_TYPE
)
7954 type
= TREE_TYPE (type
);
7955 } while (AGGREGATE_TYPE_P (type
));
7957 if (! AGGREGATE_TYPE_P (type
) && type
!= error_mark_node
)
7958 align
= MAX (align
, TYPE_ALIGN (type
));
7963 /* Return 1 for an operand in small memory on V.4/eabi. */
7966 small_data_operand (rtx op ATTRIBUTE_UNUSED
,
7967 machine_mode mode ATTRIBUTE_UNUSED
)
7972 if (rs6000_sdata
== SDATA_NONE
|| rs6000_sdata
== SDATA_DATA
)
7975 if (DEFAULT_ABI
!= ABI_V4
)
7978 if (GET_CODE (op
) == SYMBOL_REF
)
7981 else if (GET_CODE (op
) != CONST
7982 || GET_CODE (XEXP (op
, 0)) != PLUS
7983 || GET_CODE (XEXP (XEXP (op
, 0), 0)) != SYMBOL_REF
7984 || GET_CODE (XEXP (XEXP (op
, 0), 1)) != CONST_INT
)
7989 rtx sum
= XEXP (op
, 0);
7990 HOST_WIDE_INT summand
;
7992 /* We have to be careful here, because it is the referenced address
7993 that must be 32k from _SDA_BASE_, not just the symbol. */
7994 summand
= INTVAL (XEXP (sum
, 1));
7995 if (summand
< 0 || summand
> g_switch_value
)
7998 sym_ref
= XEXP (sum
, 0);
8001 return SYMBOL_REF_SMALL_P (sym_ref
);
8007 /* Return true if either operand is a general purpose register. */
8010 gpr_or_gpr_p (rtx op0
, rtx op1
)
8012 return ((REG_P (op0
) && INT_REGNO_P (REGNO (op0
)))
8013 || (REG_P (op1
) && INT_REGNO_P (REGNO (op1
))));
8016 /* Return true if this is a move direct operation between GPR registers and
8017 floating point/VSX registers. */
8020 direct_move_p (rtx op0
, rtx op1
)
8024 if (!REG_P (op0
) || !REG_P (op1
))
8027 if (!TARGET_DIRECT_MOVE
&& !TARGET_MFPGPR
)
8030 regno0
= REGNO (op0
);
8031 regno1
= REGNO (op1
);
8032 if (regno0
>= FIRST_PSEUDO_REGISTER
|| regno1
>= FIRST_PSEUDO_REGISTER
)
8035 if (INT_REGNO_P (regno0
))
8036 return (TARGET_DIRECT_MOVE
) ? VSX_REGNO_P (regno1
) : FP_REGNO_P (regno1
);
8038 else if (INT_REGNO_P (regno1
))
8040 if (TARGET_MFPGPR
&& FP_REGNO_P (regno0
))
8043 else if (TARGET_DIRECT_MOVE
&& VSX_REGNO_P (regno0
))
8050 /* Return true if the OFFSET is valid for the quad address instructions that
8051 use d-form (register + offset) addressing. */
8054 quad_address_offset_p (HOST_WIDE_INT offset
)
8056 return (IN_RANGE (offset
, -32768, 32767) && ((offset
) & 0xf) == 0);
8059 /* Return true if the ADDR is an acceptable address for a quad memory
8060 operation of mode MODE (either LQ/STQ for general purpose registers, or
8061 LXV/STXV for vector registers under ISA 3.0. GPR_P is true if this address
8062 is intended for LQ/STQ. If it is false, the address is intended for the ISA
8063 3.0 LXV/STXV instruction. */
8066 quad_address_p (rtx addr
, machine_mode mode
, bool strict
)
8070 if (GET_MODE_SIZE (mode
) != 16)
8073 if (legitimate_indirect_address_p (addr
, strict
))
8076 if (VECTOR_MODE_P (mode
) && !mode_supports_vsx_dform_quad (mode
))
8079 if (GET_CODE (addr
) != PLUS
)
8082 op0
= XEXP (addr
, 0);
8083 if (!REG_P (op0
) || !INT_REG_OK_FOR_BASE_P (op0
, strict
))
8086 op1
= XEXP (addr
, 1);
8087 if (!CONST_INT_P (op1
))
8090 return quad_address_offset_p (INTVAL (op1
));
8093 /* Return true if this is a load or store quad operation. This function does
8094 not handle the atomic quad memory instructions. */
8097 quad_load_store_p (rtx op0
, rtx op1
)
8101 if (!TARGET_QUAD_MEMORY
)
8104 else if (REG_P (op0
) && MEM_P (op1
))
8105 ret
= (quad_int_reg_operand (op0
, GET_MODE (op0
))
8106 && quad_memory_operand (op1
, GET_MODE (op1
))
8107 && !reg_overlap_mentioned_p (op0
, op1
));
8109 else if (MEM_P (op0
) && REG_P (op1
))
8110 ret
= (quad_memory_operand (op0
, GET_MODE (op0
))
8111 && quad_int_reg_operand (op1
, GET_MODE (op1
)));
8116 if (TARGET_DEBUG_ADDR
)
8118 fprintf (stderr
, "\n========== quad_load_store, return %s\n",
8119 ret
? "true" : "false");
8120 debug_rtx (gen_rtx_SET (op0
, op1
));
8126 /* Given an address, return a constant offset term if one exists. */
8129 address_offset (rtx op
)
8131 if (GET_CODE (op
) == PRE_INC
8132 || GET_CODE (op
) == PRE_DEC
)
8134 else if (GET_CODE (op
) == PRE_MODIFY
8135 || GET_CODE (op
) == LO_SUM
)
8138 if (GET_CODE (op
) == CONST
)
8141 if (GET_CODE (op
) == PLUS
)
8144 if (CONST_INT_P (op
))
8150 /* Return true if the MEM operand is a memory operand suitable for use
8151 with a (full width, possibly multiple) gpr load/store. On
8152 powerpc64 this means the offset must be divisible by 4.
8153 Implements 'Y' constraint.
8155 Accept direct, indexed, offset, lo_sum and tocref. Since this is
8156 a constraint function we know the operand has satisfied a suitable
8157 memory predicate. Also accept some odd rtl generated by reload
8158 (see rs6000_legitimize_reload_address for various forms). It is
8159 important that reload rtl be accepted by appropriate constraints
8160 but not by the operand predicate.
8162 Offsetting a lo_sum should not be allowed, except where we know by
8163 alignment that a 32k boundary is not crossed, but see the ???
8164 comment in rs6000_legitimize_reload_address. Note that by
8165 "offsetting" here we mean a further offset to access parts of the
8166 MEM. It's fine to have a lo_sum where the inner address is offset
8167 from a sym, since the same sym+offset will appear in the high part
8168 of the address calculation. */
8171 mem_operand_gpr (rtx op
, machine_mode mode
)
8173 unsigned HOST_WIDE_INT offset
;
8175 rtx addr
= XEXP (op
, 0);
8177 op
= address_offset (addr
);
8181 offset
= INTVAL (op
);
8182 if (TARGET_POWERPC64
&& (offset
& 3) != 0)
8185 extra
= GET_MODE_SIZE (mode
) - UNITS_PER_WORD
;
8189 if (GET_CODE (addr
) == LO_SUM
)
8190 /* For lo_sum addresses, we must allow any offset except one that
8191 causes a wrap, so test only the low 16 bits. */
8192 offset
= ((offset
& 0xffff) ^ 0x8000) - 0x8000;
8194 return offset
+ 0x8000 < 0x10000u
- extra
;
8197 /* As above, but for DS-FORM VSX insns. Unlike mem_operand_gpr,
8198 enforce an offset divisible by 4 even for 32-bit. */
8201 mem_operand_ds_form (rtx op
, machine_mode mode
)
8203 unsigned HOST_WIDE_INT offset
;
8205 rtx addr
= XEXP (op
, 0);
8207 if (!offsettable_address_p (false, mode
, addr
))
8210 op
= address_offset (addr
);
8214 offset
= INTVAL (op
);
8215 if ((offset
& 3) != 0)
8218 extra
= GET_MODE_SIZE (mode
) - UNITS_PER_WORD
;
8222 if (GET_CODE (addr
) == LO_SUM
)
8223 /* For lo_sum addresses, we must allow any offset except one that
8224 causes a wrap, so test only the low 16 bits. */
8225 offset
= ((offset
& 0xffff) ^ 0x8000) - 0x8000;
8227 return offset
+ 0x8000 < 0x10000u
- extra
;
8230 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
8233 reg_offset_addressing_ok_p (machine_mode mode
)
8247 /* AltiVec/VSX vector modes. Only reg+reg addressing was valid until the
8248 ISA 3.0 vector d-form addressing mode was added. While TImode is not
8249 a vector mode, if we want to use the VSX registers to move it around,
8250 we need to restrict ourselves to reg+reg addressing. Similarly for
8251 IEEE 128-bit floating point that is passed in a single vector
8253 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode
))
8254 return mode_supports_vsx_dform_quad (mode
);
8259 /* Paired vector modes. Only reg+reg addressing is valid. */
8260 if (TARGET_PAIRED_FLOAT
)
8265 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
8266 addressing for the LFIWZX and STFIWX instructions. */
8267 if (TARGET_NO_SDMODE_STACK
)
8279 virtual_stack_registers_memory_p (rtx op
)
8283 if (GET_CODE (op
) == REG
)
8284 regnum
= REGNO (op
);
8286 else if (GET_CODE (op
) == PLUS
8287 && GET_CODE (XEXP (op
, 0)) == REG
8288 && GET_CODE (XEXP (op
, 1)) == CONST_INT
)
8289 regnum
= REGNO (XEXP (op
, 0));
8294 return (regnum
>= FIRST_VIRTUAL_REGISTER
8295 && regnum
<= LAST_VIRTUAL_POINTER_REGISTER
);
8298 /* Return true if a MODE sized memory accesses to OP plus OFFSET
8299 is known to not straddle a 32k boundary. This function is used
8300 to determine whether -mcmodel=medium code can use TOC pointer
8301 relative addressing for OP. This means the alignment of the TOC
8302 pointer must also be taken into account, and unfortunately that is
8305 #ifndef POWERPC64_TOC_POINTER_ALIGNMENT
8306 #define POWERPC64_TOC_POINTER_ALIGNMENT 8
8310 offsettable_ok_by_alignment (rtx op
, HOST_WIDE_INT offset
,
8314 unsigned HOST_WIDE_INT dsize
, dalign
, lsb
, mask
;
8316 if (GET_CODE (op
) != SYMBOL_REF
)
8319 /* ISA 3.0 vector d-form addressing is restricted, don't allow
8321 if (mode_supports_vsx_dform_quad (mode
))
8324 dsize
= GET_MODE_SIZE (mode
);
8325 decl
= SYMBOL_REF_DECL (op
);
8331 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
8332 replacing memory addresses with an anchor plus offset. We
8333 could find the decl by rummaging around in the block->objects
8334 VEC for the given offset but that seems like too much work. */
8335 dalign
= BITS_PER_UNIT
;
8336 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op
)
8337 && SYMBOL_REF_ANCHOR_P (op
)
8338 && SYMBOL_REF_BLOCK (op
) != NULL
)
8340 struct object_block
*block
= SYMBOL_REF_BLOCK (op
);
8342 dalign
= block
->alignment
;
8343 offset
+= SYMBOL_REF_BLOCK_OFFSET (op
);
8345 else if (CONSTANT_POOL_ADDRESS_P (op
))
8347 /* It would be nice to have get_pool_align().. */
8348 machine_mode cmode
= get_pool_mode (op
);
8350 dalign
= GET_MODE_ALIGNMENT (cmode
);
8353 else if (DECL_P (decl
))
8355 dalign
= DECL_ALIGN (decl
);
8359 /* Allow BLKmode when the entire object is known to not
8360 cross a 32k boundary. */
8361 if (!DECL_SIZE_UNIT (decl
))
8364 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl
)))
8367 dsize
= tree_to_uhwi (DECL_SIZE_UNIT (decl
));
8371 dalign
/= BITS_PER_UNIT
;
8372 if (dalign
> POWERPC64_TOC_POINTER_ALIGNMENT
)
8373 dalign
= POWERPC64_TOC_POINTER_ALIGNMENT
;
8374 return dalign
>= dsize
;
8380 /* Find how many bits of the alignment we know for this access. */
8381 dalign
/= BITS_PER_UNIT
;
8382 if (dalign
> POWERPC64_TOC_POINTER_ALIGNMENT
)
8383 dalign
= POWERPC64_TOC_POINTER_ALIGNMENT
;
8385 lsb
= offset
& -offset
;
8389 return dalign
>= dsize
;
8393 constant_pool_expr_p (rtx op
)
8397 split_const (op
, &base
, &offset
);
8398 return (GET_CODE (base
) == SYMBOL_REF
8399 && CONSTANT_POOL_ADDRESS_P (base
)
8400 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base
), Pmode
));
8403 /* These are only used to pass through from print_operand/print_operand_address
8404 to rs6000_output_addr_const_extra over the intervening function
8405 output_addr_const which is not target code. */
8406 static const_rtx tocrel_base_oac
, tocrel_offset_oac
;
8408 /* Return true if OP is a toc pointer relative address (the output
8409 of create_TOC_reference). If STRICT, do not match non-split
8410 -mcmodel=large/medium toc pointer relative addresses. If the pointers
8411 are non-NULL, place base and offset pieces in TOCREL_BASE_RET and
8412 TOCREL_OFFSET_RET respectively. */
8415 toc_relative_expr_p (const_rtx op
, bool strict
, const_rtx
*tocrel_base_ret
,
8416 const_rtx
*tocrel_offset_ret
)
8421 if (TARGET_CMODEL
!= CMODEL_SMALL
)
8423 /* When strict ensure we have everything tidy. */
8425 && !(GET_CODE (op
) == LO_SUM
8426 && REG_P (XEXP (op
, 0))
8427 && INT_REG_OK_FOR_BASE_P (XEXP (op
, 0), strict
)))
8430 /* When not strict, allow non-split TOC addresses and also allow
8431 (lo_sum (high ..)) TOC addresses created during reload. */
8432 if (GET_CODE (op
) == LO_SUM
)
8436 const_rtx tocrel_base
= op
;
8437 const_rtx tocrel_offset
= const0_rtx
;
8439 if (GET_CODE (op
) == PLUS
&& add_cint_operand (XEXP (op
, 1), GET_MODE (op
)))
8441 tocrel_base
= XEXP (op
, 0);
8442 tocrel_offset
= XEXP (op
, 1);
8445 if (tocrel_base_ret
)
8446 *tocrel_base_ret
= tocrel_base
;
8447 if (tocrel_offset_ret
)
8448 *tocrel_offset_ret
= tocrel_offset
;
8450 return (GET_CODE (tocrel_base
) == UNSPEC
8451 && XINT (tocrel_base
, 1) == UNSPEC_TOCREL
);
8454 /* Return true if X is a constant pool address, and also for cmodel=medium
8455 if X is a toc-relative address known to be offsettable within MODE. */
8458 legitimate_constant_pool_address_p (const_rtx x
, machine_mode mode
,
8461 const_rtx tocrel_base
, tocrel_offset
;
8462 return (toc_relative_expr_p (x
, strict
, &tocrel_base
, &tocrel_offset
)
8463 && (TARGET_CMODEL
!= CMODEL_MEDIUM
8464 || constant_pool_expr_p (XVECEXP (tocrel_base
, 0, 0))
8466 || offsettable_ok_by_alignment (XVECEXP (tocrel_base
, 0, 0),
8467 INTVAL (tocrel_offset
), mode
)));
8471 legitimate_small_data_p (machine_mode mode
, rtx x
)
8473 return (DEFAULT_ABI
== ABI_V4
8474 && !flag_pic
&& !TARGET_TOC
8475 && (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == CONST
)
8476 && small_data_operand (x
, mode
));
8480 rs6000_legitimate_offset_address_p (machine_mode mode
, rtx x
,
8481 bool strict
, bool worst_case
)
8483 unsigned HOST_WIDE_INT offset
;
8486 if (GET_CODE (x
) != PLUS
)
8488 if (!REG_P (XEXP (x
, 0)))
8490 if (!INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), strict
))
8492 if (mode_supports_vsx_dform_quad (mode
))
8493 return quad_address_p (x
, mode
, strict
);
8494 if (!reg_offset_addressing_ok_p (mode
))
8495 return virtual_stack_registers_memory_p (x
);
8496 if (legitimate_constant_pool_address_p (x
, mode
, strict
|| lra_in_progress
))
8498 if (GET_CODE (XEXP (x
, 1)) != CONST_INT
)
8501 offset
= INTVAL (XEXP (x
, 1));
8507 /* Paired single modes: offset addressing isn't valid. */
8513 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
8515 if (VECTOR_MEM_VSX_P (mode
))
8520 if (!TARGET_POWERPC64
)
8522 else if (offset
& 3)
8535 if (!TARGET_POWERPC64
)
8537 else if (offset
& 3)
8546 return offset
< 0x10000 - extra
;
8550 legitimate_indexed_address_p (rtx x
, int strict
)
8554 if (GET_CODE (x
) != PLUS
)
8560 return (REG_P (op0
) && REG_P (op1
)
8561 && ((INT_REG_OK_FOR_BASE_P (op0
, strict
)
8562 && INT_REG_OK_FOR_INDEX_P (op1
, strict
))
8563 || (INT_REG_OK_FOR_BASE_P (op1
, strict
)
8564 && INT_REG_OK_FOR_INDEX_P (op0
, strict
))));
8568 avoiding_indexed_address_p (machine_mode mode
)
8570 /* Avoid indexed addressing for modes that have non-indexed
8571 load/store instruction forms. */
8572 return (TARGET_AVOID_XFORM
&& VECTOR_MEM_NONE_P (mode
));
8576 legitimate_indirect_address_p (rtx x
, int strict
)
8578 return GET_CODE (x
) == REG
&& INT_REG_OK_FOR_BASE_P (x
, strict
);
8582 macho_lo_sum_memory_operand (rtx x
, machine_mode mode
)
8584 if (!TARGET_MACHO
|| !flag_pic
8585 || mode
!= SImode
|| GET_CODE (x
) != MEM
)
8589 if (GET_CODE (x
) != LO_SUM
)
8591 if (GET_CODE (XEXP (x
, 0)) != REG
)
8593 if (!INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), 0))
8597 return CONSTANT_P (x
);
8601 legitimate_lo_sum_address_p (machine_mode mode
, rtx x
, int strict
)
8603 if (GET_CODE (x
) != LO_SUM
)
8605 if (GET_CODE (XEXP (x
, 0)) != REG
)
8607 if (!INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), strict
))
8609 /* quad word addresses are restricted, and we can't use LO_SUM. */
8610 if (mode_supports_vsx_dform_quad (mode
))
8614 if (TARGET_ELF
|| TARGET_MACHO
)
8618 if (DEFAULT_ABI
== ABI_V4
&& flag_pic
)
8620 /* LRA doesn't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
8621 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
8622 recognizes some LO_SUM addresses as valid although this
8623 function says opposite. In most cases, LRA through different
8624 transformations can generate correct code for address reloads.
8625 It can not manage only some LO_SUM cases. So we need to add
8626 code analogous to one in rs6000_legitimize_reload_address for
8627 LOW_SUM here saying that some addresses are still valid. */
8628 large_toc_ok
= (lra_in_progress
&& TARGET_CMODEL
!= CMODEL_SMALL
8629 && small_toc_ref (x
, VOIDmode
));
8630 if (TARGET_TOC
&& ! large_toc_ok
)
8632 if (GET_MODE_NUNITS (mode
) != 1)
8634 if (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
8635 && !(/* ??? Assume floating point reg based on mode? */
8636 TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
8637 && (mode
== DFmode
|| mode
== DDmode
)))
8640 return CONSTANT_P (x
) || large_toc_ok
;
8647 /* Try machine-dependent ways of modifying an illegitimate address
8648 to be legitimate. If we find one, return the new, valid address.
8649 This is used from only one place: `memory_address' in explow.c.
8651 OLDX is the address as it was before break_out_memory_refs was
8652 called. In some cases it is useful to look at this to decide what
8655 It is always safe for this function to do nothing. It exists to
8656 recognize opportunities to optimize the output.
8658 On RS/6000, first check for the sum of a register with a constant
8659 integer that is out of range. If so, generate code to add the
8660 constant with the low-order 16 bits masked to the register and force
8661 this result into another register (this can be done with `cau').
8662 Then generate an address of REG+(CONST&0xffff), allowing for the
8663 possibility of bit 16 being a one.
8665 Then check for the sum of a register and something not constant, try to
8666 load the other things into a register and return the sum. */
8669 rs6000_legitimize_address (rtx x
, rtx oldx ATTRIBUTE_UNUSED
,
8674 if (!reg_offset_addressing_ok_p (mode
)
8675 || mode_supports_vsx_dform_quad (mode
))
8677 if (virtual_stack_registers_memory_p (x
))
8680 /* In theory we should not be seeing addresses of the form reg+0,
8681 but just in case it is generated, optimize it away. */
8682 if (GET_CODE (x
) == PLUS
&& XEXP (x
, 1) == const0_rtx
)
8683 return force_reg (Pmode
, XEXP (x
, 0));
8685 /* For TImode with load/store quad, restrict addresses to just a single
8686 pointer, so it works with both GPRs and VSX registers. */
8687 /* Make sure both operands are registers. */
8688 else if (GET_CODE (x
) == PLUS
8689 && (mode
!= TImode
|| !TARGET_VSX
))
8690 return gen_rtx_PLUS (Pmode
,
8691 force_reg (Pmode
, XEXP (x
, 0)),
8692 force_reg (Pmode
, XEXP (x
, 1)));
8694 return force_reg (Pmode
, x
);
8696 if (GET_CODE (x
) == SYMBOL_REF
)
8698 enum tls_model model
= SYMBOL_REF_TLS_MODEL (x
);
8700 return rs6000_legitimize_tls_address (x
, model
);
8712 /* As in legitimate_offset_address_p we do not assume
8713 worst-case. The mode here is just a hint as to the registers
8714 used. A TImode is usually in gprs, but may actually be in
8715 fprs. Leave worst-case scenario for reload to handle via
8716 insn constraints. PTImode is only GPRs. */
8723 if (GET_CODE (x
) == PLUS
8724 && GET_CODE (XEXP (x
, 0)) == REG
8725 && GET_CODE (XEXP (x
, 1)) == CONST_INT
8726 && ((unsigned HOST_WIDE_INT
) (INTVAL (XEXP (x
, 1)) + 0x8000)
8728 && !PAIRED_VECTOR_MODE (mode
))
8730 HOST_WIDE_INT high_int
, low_int
;
8732 low_int
= ((INTVAL (XEXP (x
, 1)) & 0xffff) ^ 0x8000) - 0x8000;
8733 if (low_int
>= 0x8000 - extra
)
8735 high_int
= INTVAL (XEXP (x
, 1)) - low_int
;
8736 sum
= force_operand (gen_rtx_PLUS (Pmode
, XEXP (x
, 0),
8737 GEN_INT (high_int
)), 0);
8738 return plus_constant (Pmode
, sum
, low_int
);
8740 else if (GET_CODE (x
) == PLUS
8741 && GET_CODE (XEXP (x
, 0)) == REG
8742 && GET_CODE (XEXP (x
, 1)) != CONST_INT
8743 && GET_MODE_NUNITS (mode
) == 1
8744 && (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
8745 || (/* ??? Assume floating point reg based on mode? */
8746 (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
8747 && (mode
== DFmode
|| mode
== DDmode
)))
8748 && !avoiding_indexed_address_p (mode
))
8750 return gen_rtx_PLUS (Pmode
, XEXP (x
, 0),
8751 force_reg (Pmode
, force_operand (XEXP (x
, 1), 0)));
8753 else if (PAIRED_VECTOR_MODE (mode
))
8757 /* We accept [reg + reg]. */
8759 if (GET_CODE (x
) == PLUS
)
8761 rtx op1
= XEXP (x
, 0);
8762 rtx op2
= XEXP (x
, 1);
8765 op1
= force_reg (Pmode
, op1
);
8766 op2
= force_reg (Pmode
, op2
);
8768 /* We can't always do [reg + reg] for these, because [reg +
8769 reg + offset] is not a legitimate addressing mode. */
8770 y
= gen_rtx_PLUS (Pmode
, op1
, op2
);
8772 if ((GET_MODE_SIZE (mode
) > 8 || mode
== DDmode
) && REG_P (op2
))
8773 return force_reg (Pmode
, y
);
8778 return force_reg (Pmode
, x
);
8780 else if ((TARGET_ELF
8782 || !MACHO_DYNAMIC_NO_PIC_P
8788 && GET_CODE (x
) != CONST_INT
8789 && GET_CODE (x
) != CONST_WIDE_INT
8790 && GET_CODE (x
) != CONST_DOUBLE
8792 && GET_MODE_NUNITS (mode
) == 1
8793 && (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
8794 || (/* ??? Assume floating point reg based on mode? */
8795 (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
8796 && (mode
== DFmode
|| mode
== DDmode
))))
8798 rtx reg
= gen_reg_rtx (Pmode
);
8800 emit_insn (gen_elf_high (reg
, x
));
8802 emit_insn (gen_macho_high (reg
, x
));
8803 return gen_rtx_LO_SUM (Pmode
, reg
, x
);
8806 && GET_CODE (x
) == SYMBOL_REF
8807 && constant_pool_expr_p (x
)
8808 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x
), Pmode
))
8809 return create_TOC_reference (x
, NULL_RTX
);
8814 /* Debug version of rs6000_legitimize_address. */
8816 rs6000_debug_legitimize_address (rtx x
, rtx oldx
, machine_mode mode
)
8822 ret
= rs6000_legitimize_address (x
, oldx
, mode
);
8823 insns
= get_insns ();
8829 "\nrs6000_legitimize_address: mode %s, old code %s, "
8830 "new code %s, modified\n",
8831 GET_MODE_NAME (mode
), GET_RTX_NAME (GET_CODE (x
)),
8832 GET_RTX_NAME (GET_CODE (ret
)));
8834 fprintf (stderr
, "Original address:\n");
8837 fprintf (stderr
, "oldx:\n");
8840 fprintf (stderr
, "New address:\n");
8845 fprintf (stderr
, "Insns added:\n");
8846 debug_rtx_list (insns
, 20);
8852 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
8853 GET_MODE_NAME (mode
), GET_RTX_NAME (GET_CODE (x
)));
8864 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8865 We need to emit DTP-relative relocations. */
8867 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx
) ATTRIBUTE_UNUSED
;
8869 rs6000_output_dwarf_dtprel (FILE *file
, int size
, rtx x
)
8874 fputs ("\t.long\t", file
);
8877 fputs (DOUBLE_INT_ASM_OP
, file
);
8882 output_addr_const (file
, x
);
8884 fputs ("@dtprel+0x8000", file
);
8885 else if (TARGET_XCOFF
&& GET_CODE (x
) == SYMBOL_REF
)
8887 switch (SYMBOL_REF_TLS_MODEL (x
))
8891 case TLS_MODEL_LOCAL_EXEC
:
8892 fputs ("@le", file
);
8894 case TLS_MODEL_INITIAL_EXEC
:
8895 fputs ("@ie", file
);
8897 case TLS_MODEL_GLOBAL_DYNAMIC
:
8898 case TLS_MODEL_LOCAL_DYNAMIC
:
8907 /* Return true if X is a symbol that refers to real (rather than emulated)
8911 rs6000_real_tls_symbol_ref_p (rtx x
)
8913 return (GET_CODE (x
) == SYMBOL_REF
8914 && SYMBOL_REF_TLS_MODEL (x
) >= TLS_MODEL_REAL
);
8917 /* In the name of slightly smaller debug output, and to cater to
8918 general assembler lossage, recognize various UNSPEC sequences
8919 and turn them back into a direct symbol reference. */
8922 rs6000_delegitimize_address (rtx orig_x
)
8926 orig_x
= delegitimize_mem_from_attrs (orig_x
);
8932 if (TARGET_CMODEL
!= CMODEL_SMALL
8933 && GET_CODE (y
) == LO_SUM
)
8937 if (GET_CODE (y
) == PLUS
8938 && GET_MODE (y
) == Pmode
8939 && CONST_INT_P (XEXP (y
, 1)))
8941 offset
= XEXP (y
, 1);
8945 if (GET_CODE (y
) == UNSPEC
8946 && XINT (y
, 1) == UNSPEC_TOCREL
)
8948 y
= XVECEXP (y
, 0, 0);
8951 /* Do not associate thread-local symbols with the original
8952 constant pool symbol. */
8954 && GET_CODE (y
) == SYMBOL_REF
8955 && CONSTANT_POOL_ADDRESS_P (y
)
8956 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y
)))
8960 if (offset
!= NULL_RTX
)
8961 y
= gen_rtx_PLUS (Pmode
, y
, offset
);
8962 if (!MEM_P (orig_x
))
8965 return replace_equiv_address_nv (orig_x
, y
);
8969 && GET_CODE (orig_x
) == LO_SUM
8970 && GET_CODE (XEXP (orig_x
, 1)) == CONST
)
8972 y
= XEXP (XEXP (orig_x
, 1), 0);
8973 if (GET_CODE (y
) == UNSPEC
8974 && XINT (y
, 1) == UNSPEC_MACHOPIC_OFFSET
)
8975 return XVECEXP (y
, 0, 0);
8981 /* Return true if X shouldn't be emitted into the debug info.
8982 The linker doesn't like .toc section references from
8983 .debug_* sections, so reject .toc section symbols. */
8986 rs6000_const_not_ok_for_debug_p (rtx x
)
8988 if (GET_CODE (x
) == SYMBOL_REF
8989 && CONSTANT_POOL_ADDRESS_P (x
))
8991 rtx c
= get_pool_constant (x
);
8992 machine_mode cmode
= get_pool_mode (x
);
8993 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c
, cmode
))
9001 /* Implement the TARGET_LEGITIMATE_COMBINED_INSN hook. */
9004 rs6000_legitimate_combined_insn (rtx_insn
*insn
)
9006 int icode
= INSN_CODE (insn
);
9008 /* Reject creating doloop insns. Combine should not be allowed
9009 to create these for a number of reasons:
9010 1) In a nested loop, if combine creates one of these in an
9011 outer loop and the register allocator happens to allocate ctr
9012 to the outer loop insn, then the inner loop can't use ctr.
9013 Inner loops ought to be more highly optimized.
9014 2) Combine often wants to create one of these from what was
9015 originally a three insn sequence, first combining the three
9016 insns to two, then to ctrsi/ctrdi. When ctrsi/ctrdi is not
9017 allocated ctr, the splitter takes use back to the three insn
9018 sequence. It's better to stop combine at the two insn
9020 3) Faced with not being able to allocate ctr for ctrsi/crtdi
9021 insns, the register allocator sometimes uses floating point
9022 or vector registers for the pseudo. Since ctrsi/ctrdi is a
9023 jump insn and output reloads are not implemented for jumps,
9024 the ctrsi/ctrdi splitters need to handle all possible cases.
9025 That's a pain, and it gets to be seriously difficult when a
9026 splitter that runs after reload needs memory to transfer from
9027 a gpr to fpr. See PR70098 and PR71763 which are not fixed
9028 for the difficult case. It's better to not create problems
9029 in the first place. */
9030 if (icode
!= CODE_FOR_nothing
9031 && (icode
== CODE_FOR_ctrsi_internal1
9032 || icode
== CODE_FOR_ctrdi_internal1
9033 || icode
== CODE_FOR_ctrsi_internal2
9034 || icode
== CODE_FOR_ctrdi_internal2
))
9040 /* Construct the SYMBOL_REF for the tls_get_addr function. */
9042 static GTY(()) rtx rs6000_tls_symbol
;
9044 rs6000_tls_get_addr (void)
9046 if (!rs6000_tls_symbol
)
9047 rs6000_tls_symbol
= init_one_libfunc ("__tls_get_addr");
9049 return rs6000_tls_symbol
;
9052 /* Construct the SYMBOL_REF for TLS GOT references. */
9054 static GTY(()) rtx rs6000_got_symbol
;
9056 rs6000_got_sym (void)
9058 if (!rs6000_got_symbol
)
9060 rs6000_got_symbol
= gen_rtx_SYMBOL_REF (Pmode
, "_GLOBAL_OFFSET_TABLE_");
9061 SYMBOL_REF_FLAGS (rs6000_got_symbol
) |= SYMBOL_FLAG_LOCAL
;
9062 SYMBOL_REF_FLAGS (rs6000_got_symbol
) |= SYMBOL_FLAG_EXTERNAL
;
9065 return rs6000_got_symbol
;
9068 /* AIX Thread-Local Address support. */
9071 rs6000_legitimize_tls_address_aix (rtx addr
, enum tls_model model
)
9073 rtx sym
, mem
, tocref
, tlsreg
, tmpreg
, dest
, tlsaddr
;
9077 name
= XSTR (addr
, 0);
9078 /* Append TLS CSECT qualifier, unless the symbol already is qualified
9079 or the symbol will be in TLS private data section. */
9080 if (name
[strlen (name
) - 1] != ']'
9081 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr
))
9082 || bss_initializer_p (SYMBOL_REF_DECL (addr
))))
9084 tlsname
= XALLOCAVEC (char, strlen (name
) + 4);
9085 strcpy (tlsname
, name
);
9087 bss_initializer_p (SYMBOL_REF_DECL (addr
)) ? "[UL]" : "[TL]");
9088 tlsaddr
= copy_rtx (addr
);
9089 XSTR (tlsaddr
, 0) = ggc_strdup (tlsname
);
9094 /* Place addr into TOC constant pool. */
9095 sym
= force_const_mem (GET_MODE (tlsaddr
), tlsaddr
);
9097 /* Output the TOC entry and create the MEM referencing the value. */
9098 if (constant_pool_expr_p (XEXP (sym
, 0))
9099 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym
, 0)), Pmode
))
9101 tocref
= create_TOC_reference (XEXP (sym
, 0), NULL_RTX
);
9102 mem
= gen_const_mem (Pmode
, tocref
);
9103 set_mem_alias_set (mem
, get_TOC_alias_set ());
9108 /* Use global-dynamic for local-dynamic. */
9109 if (model
== TLS_MODEL_GLOBAL_DYNAMIC
9110 || model
== TLS_MODEL_LOCAL_DYNAMIC
)
9112 /* Create new TOC reference for @m symbol. */
9113 name
= XSTR (XVECEXP (XEXP (mem
, 0), 0, 0), 0);
9114 tlsname
= XALLOCAVEC (char, strlen (name
) + 1);
9115 strcpy (tlsname
, "*LCM");
9116 strcat (tlsname
, name
+ 3);
9117 rtx modaddr
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (tlsname
));
9118 SYMBOL_REF_FLAGS (modaddr
) |= SYMBOL_FLAG_LOCAL
;
9119 tocref
= create_TOC_reference (modaddr
, NULL_RTX
);
9120 rtx modmem
= gen_const_mem (Pmode
, tocref
);
9121 set_mem_alias_set (modmem
, get_TOC_alias_set ());
9123 rtx modreg
= gen_reg_rtx (Pmode
);
9124 emit_insn (gen_rtx_SET (modreg
, modmem
));
9126 tmpreg
= gen_reg_rtx (Pmode
);
9127 emit_insn (gen_rtx_SET (tmpreg
, mem
));
9129 dest
= gen_reg_rtx (Pmode
);
9131 emit_insn (gen_tls_get_addrsi (dest
, modreg
, tmpreg
));
9133 emit_insn (gen_tls_get_addrdi (dest
, modreg
, tmpreg
));
9136 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
9137 else if (TARGET_32BIT
)
9139 tlsreg
= gen_reg_rtx (SImode
);
9140 emit_insn (gen_tls_get_tpointer (tlsreg
));
9143 tlsreg
= gen_rtx_REG (DImode
, 13);
9145 /* Load the TOC value into temporary register. */
9146 tmpreg
= gen_reg_rtx (Pmode
);
9147 emit_insn (gen_rtx_SET (tmpreg
, mem
));
9148 set_unique_reg_note (get_last_insn (), REG_EQUAL
,
9149 gen_rtx_MINUS (Pmode
, addr
, tlsreg
));
9151 /* Add TOC symbol value to TLS pointer. */
9152 dest
= force_reg (Pmode
, gen_rtx_PLUS (Pmode
, tmpreg
, tlsreg
));
9157 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
9158 this (thread-local) address. */
9161 rs6000_legitimize_tls_address (rtx addr
, enum tls_model model
)
9166 return rs6000_legitimize_tls_address_aix (addr
, model
);
9168 dest
= gen_reg_rtx (Pmode
);
9169 if (model
== TLS_MODEL_LOCAL_EXEC
&& rs6000_tls_size
== 16)
9175 tlsreg
= gen_rtx_REG (Pmode
, 13);
9176 insn
= gen_tls_tprel_64 (dest
, tlsreg
, addr
);
9180 tlsreg
= gen_rtx_REG (Pmode
, 2);
9181 insn
= gen_tls_tprel_32 (dest
, tlsreg
, addr
);
9185 else if (model
== TLS_MODEL_LOCAL_EXEC
&& rs6000_tls_size
== 32)
9189 tmp
= gen_reg_rtx (Pmode
);
9192 tlsreg
= gen_rtx_REG (Pmode
, 13);
9193 insn
= gen_tls_tprel_ha_64 (tmp
, tlsreg
, addr
);
9197 tlsreg
= gen_rtx_REG (Pmode
, 2);
9198 insn
= gen_tls_tprel_ha_32 (tmp
, tlsreg
, addr
);
9202 insn
= gen_tls_tprel_lo_64 (dest
, tmp
, addr
);
9204 insn
= gen_tls_tprel_lo_32 (dest
, tmp
, addr
);
9209 rtx r3
, got
, tga
, tmp1
, tmp2
, call_insn
;
9211 /* We currently use relocations like @got@tlsgd for tls, which
9212 means the linker will handle allocation of tls entries, placing
9213 them in the .got section. So use a pointer to the .got section,
9214 not one to secondary TOC sections used by 64-bit -mminimal-toc,
9215 or to secondary GOT sections used by 32-bit -fPIC. */
9217 got
= gen_rtx_REG (Pmode
, 2);
9221 got
= gen_rtx_REG (Pmode
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
9224 rtx gsym
= rs6000_got_sym ();
9225 got
= gen_reg_rtx (Pmode
);
9227 rs6000_emit_move (got
, gsym
, Pmode
);
9232 tmp1
= gen_reg_rtx (Pmode
);
9233 tmp2
= gen_reg_rtx (Pmode
);
9234 mem
= gen_const_mem (Pmode
, tmp1
);
9235 lab
= gen_label_rtx ();
9236 emit_insn (gen_load_toc_v4_PIC_1b (gsym
, lab
));
9237 emit_move_insn (tmp1
, gen_rtx_REG (Pmode
, LR_REGNO
));
9238 if (TARGET_LINK_STACK
)
9239 emit_insn (gen_addsi3 (tmp1
, tmp1
, GEN_INT (4)));
9240 emit_move_insn (tmp2
, mem
);
9241 rtx_insn
*last
= emit_insn (gen_addsi3 (got
, tmp1
, tmp2
));
9242 set_unique_reg_note (last
, REG_EQUAL
, gsym
);
9247 if (model
== TLS_MODEL_GLOBAL_DYNAMIC
)
9249 tga
= rs6000_tls_get_addr ();
9250 emit_library_call_value (tga
, dest
, LCT_CONST
, Pmode
,
9253 r3
= gen_rtx_REG (Pmode
, 3);
9254 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
9257 insn
= gen_tls_gd_aix64 (r3
, got
, addr
, tga
, const0_rtx
);
9259 insn
= gen_tls_gd_aix32 (r3
, got
, addr
, tga
, const0_rtx
);
9261 else if (DEFAULT_ABI
== ABI_V4
)
9262 insn
= gen_tls_gd_sysvsi (r3
, got
, addr
, tga
, const0_rtx
);
9265 call_insn
= last_call_insn ();
9266 PATTERN (call_insn
) = insn
;
9267 if (DEFAULT_ABI
== ABI_V4
&& TARGET_SECURE_PLT
&& flag_pic
)
9268 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
),
9269 pic_offset_table_rtx
);
9271 else if (model
== TLS_MODEL_LOCAL_DYNAMIC
)
9273 tga
= rs6000_tls_get_addr ();
9274 tmp1
= gen_reg_rtx (Pmode
);
9275 emit_library_call_value (tga
, tmp1
, LCT_CONST
, Pmode
,
9278 r3
= gen_rtx_REG (Pmode
, 3);
9279 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
9282 insn
= gen_tls_ld_aix64 (r3
, got
, tga
, const0_rtx
);
9284 insn
= gen_tls_ld_aix32 (r3
, got
, tga
, const0_rtx
);
9286 else if (DEFAULT_ABI
== ABI_V4
)
9287 insn
= gen_tls_ld_sysvsi (r3
, got
, tga
, const0_rtx
);
9290 call_insn
= last_call_insn ();
9291 PATTERN (call_insn
) = insn
;
9292 if (DEFAULT_ABI
== ABI_V4
&& TARGET_SECURE_PLT
&& flag_pic
)
9293 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
),
9294 pic_offset_table_rtx
);
9296 if (rs6000_tls_size
== 16)
9299 insn
= gen_tls_dtprel_64 (dest
, tmp1
, addr
);
9301 insn
= gen_tls_dtprel_32 (dest
, tmp1
, addr
);
9303 else if (rs6000_tls_size
== 32)
9305 tmp2
= gen_reg_rtx (Pmode
);
9307 insn
= gen_tls_dtprel_ha_64 (tmp2
, tmp1
, addr
);
9309 insn
= gen_tls_dtprel_ha_32 (tmp2
, tmp1
, addr
);
9312 insn
= gen_tls_dtprel_lo_64 (dest
, tmp2
, addr
);
9314 insn
= gen_tls_dtprel_lo_32 (dest
, tmp2
, addr
);
9318 tmp2
= gen_reg_rtx (Pmode
);
9320 insn
= gen_tls_got_dtprel_64 (tmp2
, got
, addr
);
9322 insn
= gen_tls_got_dtprel_32 (tmp2
, got
, addr
);
9324 insn
= gen_rtx_SET (dest
, gen_rtx_PLUS (Pmode
, tmp2
, tmp1
));
9330 /* IE, or 64-bit offset LE. */
9331 tmp2
= gen_reg_rtx (Pmode
);
9333 insn
= gen_tls_got_tprel_64 (tmp2
, got
, addr
);
9335 insn
= gen_tls_got_tprel_32 (tmp2
, got
, addr
);
9338 insn
= gen_tls_tls_64 (dest
, tmp2
, addr
);
9340 insn
= gen_tls_tls_32 (dest
, tmp2
, addr
);
9348 /* Only create the global variable for the stack protect guard if we are using
9349 the global flavor of that guard. */
9351 rs6000_init_stack_protect_guard (void)
9353 if (rs6000_stack_protector_guard
== SSP_GLOBAL
)
9354 return default_stack_protect_guard ();
9359 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
9362 rs6000_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED
, rtx x
)
9364 if (GET_CODE (x
) == HIGH
9365 && GET_CODE (XEXP (x
, 0)) == UNSPEC
)
9368 /* A TLS symbol in the TOC cannot contain a sum. */
9369 if (GET_CODE (x
) == CONST
9370 && GET_CODE (XEXP (x
, 0)) == PLUS
9371 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == SYMBOL_REF
9372 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x
, 0), 0)) != 0)
9375 /* Do not place an ELF TLS symbol in the constant pool. */
9376 return TARGET_ELF
&& tls_referenced_p (x
);
9379 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
9380 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
9381 can be addressed relative to the toc pointer. */
9384 use_toc_relative_ref (rtx sym
, machine_mode mode
)
9386 return ((constant_pool_expr_p (sym
)
9387 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym
),
9388 get_pool_mode (sym
)))
9389 || (TARGET_CMODEL
== CMODEL_MEDIUM
9390 && SYMBOL_REF_LOCAL_P (sym
)
9391 && GET_MODE_SIZE (mode
) <= POWERPC64_TOC_POINTER_ALIGNMENT
));
9394 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
9395 replace the input X, or the original X if no replacement is called for.
9396 The output parameter *WIN is 1 if the calling macro should goto WIN,
9399 For RS/6000, we wish to handle large displacements off a base
9400 register by splitting the addend across an addiu/addis and the mem insn.
9401 This cuts number of extra insns needed from 3 to 1.
9403 On Darwin, we use this to generate code for floating point constants.
9404 A movsf_low is generated so we wind up with 2 instructions rather than 3.
9405 The Darwin code is inside #if TARGET_MACHO because only then are the
9406 machopic_* functions defined. */
9408 rs6000_legitimize_reload_address (rtx x
, machine_mode mode
,
9409 int opnum
, int type
,
9410 int ind_levels ATTRIBUTE_UNUSED
, int *win
)
9412 bool reg_offset_p
= reg_offset_addressing_ok_p (mode
);
9413 bool quad_offset_p
= mode_supports_vsx_dform_quad (mode
);
9415 /* Nasty hack for vsx_splat_v2df/v2di load from mem, which takes a
9416 DFmode/DImode MEM. Ditto for ISA 3.0 vsx_splat_v4sf/v4si. */
9419 && ((mode
== DFmode
&& recog_data
.operand_mode
[0] == V2DFmode
)
9420 || (mode
== DImode
&& recog_data
.operand_mode
[0] == V2DImode
)
9421 || (mode
== SFmode
&& recog_data
.operand_mode
[0] == V4SFmode
9422 && TARGET_P9_VECTOR
)
9423 || (mode
== SImode
&& recog_data
.operand_mode
[0] == V4SImode
9424 && TARGET_P9_VECTOR
)))
9425 reg_offset_p
= false;
9427 /* We must recognize output that we have already generated ourselves. */
9428 if (GET_CODE (x
) == PLUS
9429 && GET_CODE (XEXP (x
, 0)) == PLUS
9430 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
9431 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
9432 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
9434 if (TARGET_DEBUG_ADDR
)
9436 fprintf (stderr
, "\nlegitimize_reload_address push_reload #1:\n");
9439 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
9440 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
9441 opnum
, (enum reload_type
) type
);
9446 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
9447 if (GET_CODE (x
) == LO_SUM
9448 && GET_CODE (XEXP (x
, 0)) == HIGH
)
9450 if (TARGET_DEBUG_ADDR
)
9452 fprintf (stderr
, "\nlegitimize_reload_address push_reload #2:\n");
9455 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
9456 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
9457 opnum
, (enum reload_type
) type
);
9463 if (DEFAULT_ABI
== ABI_DARWIN
&& flag_pic
9464 && GET_CODE (x
) == LO_SUM
9465 && GET_CODE (XEXP (x
, 0)) == PLUS
9466 && XEXP (XEXP (x
, 0), 0) == pic_offset_table_rtx
9467 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == HIGH
9468 && XEXP (XEXP (XEXP (x
, 0), 1), 0) == XEXP (x
, 1)
9469 && machopic_operand_p (XEXP (x
, 1)))
9471 /* Result of previous invocation of this function on Darwin
9472 floating point constant. */
9473 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
9474 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
9475 opnum
, (enum reload_type
) type
);
9481 if (TARGET_CMODEL
!= CMODEL_SMALL
9484 && small_toc_ref (x
, VOIDmode
))
9486 rtx hi
= gen_rtx_HIGH (Pmode
, copy_rtx (x
));
9487 x
= gen_rtx_LO_SUM (Pmode
, hi
, x
);
9488 if (TARGET_DEBUG_ADDR
)
9490 fprintf (stderr
, "\nlegitimize_reload_address push_reload #3:\n");
9493 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
9494 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
9495 opnum
, (enum reload_type
) type
);
9500 if (GET_CODE (x
) == PLUS
9501 && REG_P (XEXP (x
, 0))
9502 && REGNO (XEXP (x
, 0)) < FIRST_PSEUDO_REGISTER
9503 && INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), 1)
9504 && CONST_INT_P (XEXP (x
, 1))
9506 && !PAIRED_VECTOR_MODE (mode
)
9507 && (quad_offset_p
|| !VECTOR_MODE_P (mode
) || VECTOR_MEM_NONE_P (mode
)))
9509 HOST_WIDE_INT val
= INTVAL (XEXP (x
, 1));
9510 HOST_WIDE_INT low
= ((val
& 0xffff) ^ 0x8000) - 0x8000;
9512 = (((val
- low
) & 0xffffffff) ^ 0x80000000) - 0x80000000;
9514 /* Check for 32-bit overflow or quad addresses with one of the
9515 four least significant bits set. */
9516 if (high
+ low
!= val
9517 || (quad_offset_p
&& (low
& 0xf)))
9523 /* Reload the high part into a base reg; leave the low part
9524 in the mem directly. */
9526 x
= gen_rtx_PLUS (GET_MODE (x
),
9527 gen_rtx_PLUS (GET_MODE (x
), XEXP (x
, 0),
9531 if (TARGET_DEBUG_ADDR
)
9533 fprintf (stderr
, "\nlegitimize_reload_address push_reload #4:\n");
9536 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
9537 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
9538 opnum
, (enum reload_type
) type
);
9543 if (GET_CODE (x
) == SYMBOL_REF
9546 && (!VECTOR_MODE_P (mode
) || VECTOR_MEM_NONE_P (mode
))
9547 && !PAIRED_VECTOR_MODE (mode
)
9549 && DEFAULT_ABI
== ABI_DARWIN
9550 && (flag_pic
|| MACHO_DYNAMIC_NO_PIC_P
)
9551 && machopic_symbol_defined_p (x
)
9553 && DEFAULT_ABI
== ABI_V4
9556 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
9557 The same goes for DImode without 64-bit gprs and DFmode and DDmode
9559 ??? Assume floating point reg based on mode? This assumption is
9560 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
9561 where reload ends up doing a DFmode load of a constant from
9562 mem using two gprs. Unfortunately, at this point reload
9563 hasn't yet selected regs so poking around in reload data
9564 won't help and even if we could figure out the regs reliably,
9565 we'd still want to allow this transformation when the mem is
9566 naturally aligned. Since we say the address is good here, we
9567 can't disable offsets from LO_SUMs in mem_operand_gpr.
9568 FIXME: Allow offset from lo_sum for other modes too, when
9569 mem is sufficiently aligned.
9571 Also disallow this if the type can go in VMX/Altivec registers, since
9572 those registers do not have d-form (reg+offset) address modes. */
9573 && !reg_addr
[mode
].scalar_in_vmx_p
9578 && (mode
!= TImode
|| !TARGET_VSX
)
9580 && (mode
!= DImode
|| TARGET_POWERPC64
)
9581 && ((mode
!= DFmode
&& mode
!= DDmode
) || TARGET_POWERPC64
9582 || (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)))
9587 rtx offset
= machopic_gen_offset (x
);
9588 x
= gen_rtx_LO_SUM (GET_MODE (x
),
9589 gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
,
9590 gen_rtx_HIGH (Pmode
, offset
)), offset
);
9594 x
= gen_rtx_LO_SUM (GET_MODE (x
),
9595 gen_rtx_HIGH (Pmode
, x
), x
);
9597 if (TARGET_DEBUG_ADDR
)
9599 fprintf (stderr
, "\nlegitimize_reload_address push_reload #5:\n");
9602 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
9603 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
9604 opnum
, (enum reload_type
) type
);
9609 /* Reload an offset address wrapped by an AND that represents the
9610 masking of the lower bits. Strip the outer AND and let reload
9611 convert the offset address into an indirect address. For VSX,
9612 force reload to create the address with an AND in a separate
9613 register, because we can't guarantee an altivec register will
9615 if (VECTOR_MEM_ALTIVEC_P (mode
)
9616 && GET_CODE (x
) == AND
9617 && GET_CODE (XEXP (x
, 0)) == PLUS
9618 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
9619 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
9620 && GET_CODE (XEXP (x
, 1)) == CONST_INT
9621 && INTVAL (XEXP (x
, 1)) == -16)
9631 && GET_CODE (x
) == SYMBOL_REF
9632 && use_toc_relative_ref (x
, mode
))
9634 x
= create_TOC_reference (x
, NULL_RTX
);
9635 if (TARGET_CMODEL
!= CMODEL_SMALL
)
9637 if (TARGET_DEBUG_ADDR
)
9639 fprintf (stderr
, "\nlegitimize_reload_address push_reload #6:\n");
9642 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
9643 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
9644 opnum
, (enum reload_type
) type
);
9653 /* Debug version of rs6000_legitimize_reload_address. */
9655 rs6000_debug_legitimize_reload_address (rtx x
, machine_mode mode
,
9656 int opnum
, int type
,
9657 int ind_levels
, int *win
)
9659 rtx ret
= rs6000_legitimize_reload_address (x
, mode
, opnum
, type
,
9662 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
9663 "type = %d, ind_levels = %d, win = %d, original addr:\n",
9664 GET_MODE_NAME (mode
), opnum
, type
, ind_levels
, *win
);
9668 fprintf (stderr
, "Same address returned\n");
9670 fprintf (stderr
, "NULL returned\n");
9673 fprintf (stderr
, "New address:\n");
9680 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
9681 that is a valid memory address for an instruction.
9682 The MODE argument is the machine mode for the MEM expression
9683 that wants to use this address.
9685 On the RS/6000, there are four valid address: a SYMBOL_REF that
9686 refers to a constant pool entry of an address (or the sum of it
9687 plus a constant), a short (16-bit signed) constant plus a register,
9688 the sum of two registers, or a register indirect, possibly with an
9689 auto-increment. For DFmode, DDmode and DImode with a constant plus
9690 register, we must ensure that both words are addressable or PowerPC64
9691 with offset word aligned.
9693 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
9694 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
9695 because adjacent memory cells are accessed by adding word-sized offsets
9696 during assembly output. */
9698 rs6000_legitimate_address_p (machine_mode mode
, rtx x
, bool reg_ok_strict
)
9700 bool reg_offset_p
= reg_offset_addressing_ok_p (mode
);
9701 bool quad_offset_p
= mode_supports_vsx_dform_quad (mode
);
9703 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
9704 if (VECTOR_MEM_ALTIVEC_P (mode
)
9705 && GET_CODE (x
) == AND
9706 && GET_CODE (XEXP (x
, 1)) == CONST_INT
9707 && INTVAL (XEXP (x
, 1)) == -16)
9710 if (TARGET_ELF
&& RS6000_SYMBOL_REF_TLS_P (x
))
9712 if (legitimate_indirect_address_p (x
, reg_ok_strict
))
9715 && (GET_CODE (x
) == PRE_INC
|| GET_CODE (x
) == PRE_DEC
)
9716 && mode_supports_pre_incdec_p (mode
)
9717 && legitimate_indirect_address_p (XEXP (x
, 0), reg_ok_strict
))
9719 /* Handle restricted vector d-form offsets in ISA 3.0. */
9722 if (quad_address_p (x
, mode
, reg_ok_strict
))
9725 else if (virtual_stack_registers_memory_p (x
))
9728 else if (reg_offset_p
)
9730 if (legitimate_small_data_p (mode
, x
))
9732 if (legitimate_constant_pool_address_p (x
, mode
,
9733 reg_ok_strict
|| lra_in_progress
))
9735 if (reg_addr
[mode
].fused_toc
&& GET_CODE (x
) == UNSPEC
9736 && XINT (x
, 1) == UNSPEC_FUSION_ADDIS
)
9740 /* For TImode, if we have TImode in VSX registers, only allow register
9741 indirect addresses. This will allow the values to go in either GPRs
9742 or VSX registers without reloading. The vector types would tend to
9743 go into VSX registers, so we allow REG+REG, while TImode seems
9744 somewhat split, in that some uses are GPR based, and some VSX based. */
9745 /* FIXME: We could loosen this by changing the following to
9746 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX)
9747 but currently we cannot allow REG+REG addressing for TImode. See
9748 PR72827 for complete details on how this ends up hoodwinking DSE. */
9749 if (mode
== TImode
&& TARGET_VSX
)
9751 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
9754 && GET_CODE (x
) == PLUS
9755 && GET_CODE (XEXP (x
, 0)) == REG
9756 && (XEXP (x
, 0) == virtual_stack_vars_rtx
9757 || XEXP (x
, 0) == arg_pointer_rtx
)
9758 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
9760 if (rs6000_legitimate_offset_address_p (mode
, x
, reg_ok_strict
, false))
9762 if (!FLOAT128_2REG_P (mode
)
9763 && ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
9765 || (mode
!= DFmode
&& mode
!= DDmode
))
9766 && (TARGET_POWERPC64
|| mode
!= DImode
)
9767 && (mode
!= TImode
|| VECTOR_MEM_VSX_P (TImode
))
9769 && !avoiding_indexed_address_p (mode
)
9770 && legitimate_indexed_address_p (x
, reg_ok_strict
))
9772 if (TARGET_UPDATE
&& GET_CODE (x
) == PRE_MODIFY
9773 && mode_supports_pre_modify_p (mode
)
9774 && legitimate_indirect_address_p (XEXP (x
, 0), reg_ok_strict
)
9775 && (rs6000_legitimate_offset_address_p (mode
, XEXP (x
, 1),
9776 reg_ok_strict
, false)
9777 || (!avoiding_indexed_address_p (mode
)
9778 && legitimate_indexed_address_p (XEXP (x
, 1), reg_ok_strict
)))
9779 && rtx_equal_p (XEXP (XEXP (x
, 1), 0), XEXP (x
, 0)))
9781 if (reg_offset_p
&& !quad_offset_p
9782 && legitimate_lo_sum_address_p (mode
, x
, reg_ok_strict
))
9787 /* Debug version of rs6000_legitimate_address_p. */
9789 rs6000_debug_legitimate_address_p (machine_mode mode
, rtx x
,
9792 bool ret
= rs6000_legitimate_address_p (mode
, x
, reg_ok_strict
);
9794 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
9795 "strict = %d, reload = %s, code = %s\n",
9796 ret
? "true" : "false",
9797 GET_MODE_NAME (mode
),
9799 (reload_completed
? "after" : "before"),
9800 GET_RTX_NAME (GET_CODE (x
)));
9806 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
9809 rs6000_mode_dependent_address_p (const_rtx addr
,
9810 addr_space_t as ATTRIBUTE_UNUSED
)
9812 return rs6000_mode_dependent_address_ptr (addr
);
9815 /* Go to LABEL if ADDR (a legitimate address expression)
9816 has an effect that depends on the machine mode it is used for.
9818 On the RS/6000 this is true of all integral offsets (since AltiVec
9819 and VSX modes don't allow them) or is a pre-increment or decrement.
9821 ??? Except that due to conceptual problems in offsettable_address_p
9822 we can't really report the problems of integral offsets. So leave
9823 this assuming that the adjustable offset must be valid for the
9824 sub-words of a TFmode operand, which is what we had before. */
9827 rs6000_mode_dependent_address (const_rtx addr
)
9829 switch (GET_CODE (addr
))
9832 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
9833 is considered a legitimate address before reload, so there
9834 are no offset restrictions in that case. Note that this
9835 condition is safe in strict mode because any address involving
9836 virtual_stack_vars_rtx or arg_pointer_rtx would already have
9837 been rejected as illegitimate. */
9838 if (XEXP (addr
, 0) != virtual_stack_vars_rtx
9839 && XEXP (addr
, 0) != arg_pointer_rtx
9840 && GET_CODE (XEXP (addr
, 1)) == CONST_INT
)
9842 unsigned HOST_WIDE_INT val
= INTVAL (XEXP (addr
, 1));
9843 return val
+ 0x8000 >= 0x10000 - (TARGET_POWERPC64
? 8 : 12);
9848 /* Anything in the constant pool is sufficiently aligned that
9849 all bytes have the same high part address. */
9850 return !legitimate_constant_pool_address_p (addr
, QImode
, false);
9852 /* Auto-increment cases are now treated generically in recog.c. */
9854 return TARGET_UPDATE
;
9856 /* AND is only allowed in Altivec loads. */
9867 /* Debug version of rs6000_mode_dependent_address. */
9869 rs6000_debug_mode_dependent_address (const_rtx addr
)
9871 bool ret
= rs6000_mode_dependent_address (addr
);
9873 fprintf (stderr
, "\nrs6000_mode_dependent_address: ret = %s\n",
9874 ret
? "true" : "false");
9880 /* Implement FIND_BASE_TERM. */
9883 rs6000_find_base_term (rtx op
)
9888 if (GET_CODE (base
) == CONST
)
9889 base
= XEXP (base
, 0);
9890 if (GET_CODE (base
) == PLUS
)
9891 base
= XEXP (base
, 0);
9892 if (GET_CODE (base
) == UNSPEC
)
9893 switch (XINT (base
, 1))
9896 case UNSPEC_MACHOPIC_OFFSET
:
9897 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
9898 for aliasing purposes. */
9899 return XVECEXP (base
, 0, 0);
9905 /* More elaborate version of recog's offsettable_memref_p predicate
9906 that works around the ??? note of rs6000_mode_dependent_address.
9907 In particular it accepts
9909 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
9911 in 32-bit mode, that the recog predicate rejects. */
9914 rs6000_offsettable_memref_p (rtx op
, machine_mode reg_mode
)
9921 /* First mimic offsettable_memref_p. */
9922 if (offsettable_address_p (true, GET_MODE (op
), XEXP (op
, 0)))
9925 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
9926 the latter predicate knows nothing about the mode of the memory
9927 reference and, therefore, assumes that it is the largest supported
9928 mode (TFmode). As a consequence, legitimate offsettable memory
9929 references are rejected. rs6000_legitimate_offset_address_p contains
9930 the correct logic for the PLUS case of rs6000_mode_dependent_address,
9931 at least with a little bit of help here given that we know the
9932 actual registers used. */
9933 worst_case
= ((TARGET_POWERPC64
&& GET_MODE_CLASS (reg_mode
) == MODE_INT
)
9934 || GET_MODE_SIZE (reg_mode
) == 4);
9935 return rs6000_legitimate_offset_address_p (GET_MODE (op
), XEXP (op
, 0),
9939 /* Determine the reassociation width to be used in reassociate_bb.
9940 This takes into account how many parallel operations we
9941 can actually do of a given type, and also the latency.
9945 vect add/sub/mul 2/cycle
9946 fp add/sub/mul 2/cycle
9951 rs6000_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED
,
9956 case PROCESSOR_POWER8
:
9957 case PROCESSOR_POWER9
:
9958 if (DECIMAL_FLOAT_MODE_P (mode
))
9960 if (VECTOR_MODE_P (mode
))
9962 if (INTEGRAL_MODE_P (mode
))
9963 return opc
== MULT_EXPR
? 4 : 6;
9964 if (FLOAT_MODE_P (mode
))
9973 /* Change register usage conditional on target flags. */
9975 rs6000_conditional_register_usage (void)
9979 if (TARGET_DEBUG_TARGET
)
9980 fprintf (stderr
, "rs6000_conditional_register_usage called\n");
9982 /* Set MQ register fixed (already call_used) so that it will not be
9986 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
9988 fixed_regs
[13] = call_used_regs
[13]
9989 = call_really_used_regs
[13] = 1;
9991 /* Conditionally disable FPRs. */
9992 if (TARGET_SOFT_FLOAT
)
9993 for (i
= 32; i
< 64; i
++)
9994 fixed_regs
[i
] = call_used_regs
[i
]
9995 = call_really_used_regs
[i
] = 1;
9997 /* The TOC register is not killed across calls in a way that is
9998 visible to the compiler. */
9999 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
10000 call_really_used_regs
[2] = 0;
10002 if (DEFAULT_ABI
== ABI_V4
&& flag_pic
== 2)
10003 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
10005 if (DEFAULT_ABI
== ABI_V4
&& flag_pic
== 1)
10006 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
10007 = call_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
10008 = call_really_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
10010 if (DEFAULT_ABI
== ABI_DARWIN
&& flag_pic
)
10011 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
10012 = call_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
10013 = call_really_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
10015 if (TARGET_TOC
&& TARGET_MINIMAL_TOC
)
10016 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
10017 = call_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
10019 if (!TARGET_ALTIVEC
&& !TARGET_VSX
)
10021 for (i
= FIRST_ALTIVEC_REGNO
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
10022 fixed_regs
[i
] = call_used_regs
[i
] = call_really_used_regs
[i
] = 1;
10023 call_really_used_regs
[VRSAVE_REGNO
] = 1;
10026 if (TARGET_ALTIVEC
|| TARGET_VSX
)
10027 global_regs
[VSCR_REGNO
] = 1;
10029 if (TARGET_ALTIVEC_ABI
)
10031 for (i
= FIRST_ALTIVEC_REGNO
; i
< FIRST_ALTIVEC_REGNO
+ 20; ++i
)
10032 call_used_regs
[i
] = call_really_used_regs
[i
] = 1;
10034 /* AIX reserves VR20:31 in non-extended ABI mode. */
10036 for (i
= FIRST_ALTIVEC_REGNO
+ 20; i
< FIRST_ALTIVEC_REGNO
+ 32; ++i
)
10037 fixed_regs
[i
] = call_used_regs
[i
] = call_really_used_regs
[i
] = 1;
10042 /* Output insns to set DEST equal to the constant SOURCE as a series of
10043 lis, ori and shl instructions and return TRUE. */
10046 rs6000_emit_set_const (rtx dest
, rtx source
)
10048 machine_mode mode
= GET_MODE (dest
);
10053 gcc_checking_assert (CONST_INT_P (source
));
10054 c
= INTVAL (source
);
10059 emit_insn (gen_rtx_SET (dest
, source
));
10063 temp
= !can_create_pseudo_p () ? dest
: gen_reg_rtx (SImode
);
10065 emit_insn (gen_rtx_SET (copy_rtx (temp
),
10066 GEN_INT (c
& ~(HOST_WIDE_INT
) 0xffff)));
10067 emit_insn (gen_rtx_SET (dest
,
10068 gen_rtx_IOR (SImode
, copy_rtx (temp
),
10069 GEN_INT (c
& 0xffff))));
10073 if (!TARGET_POWERPC64
)
10077 hi
= operand_subword_force (copy_rtx (dest
), WORDS_BIG_ENDIAN
== 0,
10079 lo
= operand_subword_force (dest
, WORDS_BIG_ENDIAN
!= 0,
10081 emit_move_insn (hi
, GEN_INT (c
>> 32));
10082 c
= ((c
& 0xffffffff) ^ 0x80000000) - 0x80000000;
10083 emit_move_insn (lo
, GEN_INT (c
));
10086 rs6000_emit_set_long_const (dest
, c
);
10090 gcc_unreachable ();
10093 insn
= get_last_insn ();
10094 set
= single_set (insn
);
10095 if (! CONSTANT_P (SET_SRC (set
)))
10096 set_unique_reg_note (insn
, REG_EQUAL
, GEN_INT (c
));
10101 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
10102 Output insns to set DEST equal to the constant C as a series of
10103 lis, ori and shl instructions. */
10106 rs6000_emit_set_long_const (rtx dest
, HOST_WIDE_INT c
)
10109 HOST_WIDE_INT ud1
, ud2
, ud3
, ud4
;
10119 if ((ud4
== 0xffff && ud3
== 0xffff && ud2
== 0xffff && (ud1
& 0x8000))
10120 || (ud4
== 0 && ud3
== 0 && ud2
== 0 && ! (ud1
& 0x8000)))
10121 emit_move_insn (dest
, GEN_INT ((ud1
^ 0x8000) - 0x8000));
10123 else if ((ud4
== 0xffff && ud3
== 0xffff && (ud2
& 0x8000))
10124 || (ud4
== 0 && ud3
== 0 && ! (ud2
& 0x8000)))
10126 temp
= !can_create_pseudo_p () ? dest
: gen_reg_rtx (DImode
);
10128 emit_move_insn (ud1
!= 0 ? copy_rtx (temp
) : dest
,
10129 GEN_INT (((ud2
<< 16) ^ 0x80000000) - 0x80000000));
10131 emit_move_insn (dest
,
10132 gen_rtx_IOR (DImode
, copy_rtx (temp
),
10135 else if (ud3
== 0 && ud4
== 0)
10137 temp
= !can_create_pseudo_p () ? dest
: gen_reg_rtx (DImode
);
10139 gcc_assert (ud2
& 0x8000);
10140 emit_move_insn (copy_rtx (temp
),
10141 GEN_INT (((ud2
<< 16) ^ 0x80000000) - 0x80000000));
10143 emit_move_insn (copy_rtx (temp
),
10144 gen_rtx_IOR (DImode
, copy_rtx (temp
),
10146 emit_move_insn (dest
,
10147 gen_rtx_ZERO_EXTEND (DImode
,
10148 gen_lowpart (SImode
,
10149 copy_rtx (temp
))));
10151 else if ((ud4
== 0xffff && (ud3
& 0x8000))
10152 || (ud4
== 0 && ! (ud3
& 0x8000)))
10154 temp
= !can_create_pseudo_p () ? dest
: gen_reg_rtx (DImode
);
10156 emit_move_insn (copy_rtx (temp
),
10157 GEN_INT (((ud3
<< 16) ^ 0x80000000) - 0x80000000));
10159 emit_move_insn (copy_rtx (temp
),
10160 gen_rtx_IOR (DImode
, copy_rtx (temp
),
10162 emit_move_insn (ud1
!= 0 ? copy_rtx (temp
) : dest
,
10163 gen_rtx_ASHIFT (DImode
, copy_rtx (temp
),
10166 emit_move_insn (dest
,
10167 gen_rtx_IOR (DImode
, copy_rtx (temp
),
10172 temp
= !can_create_pseudo_p () ? dest
: gen_reg_rtx (DImode
);
10174 emit_move_insn (copy_rtx (temp
),
10175 GEN_INT (((ud4
<< 16) ^ 0x80000000) - 0x80000000));
10177 emit_move_insn (copy_rtx (temp
),
10178 gen_rtx_IOR (DImode
, copy_rtx (temp
),
10181 emit_move_insn (ud2
!= 0 || ud1
!= 0 ? copy_rtx (temp
) : dest
,
10182 gen_rtx_ASHIFT (DImode
, copy_rtx (temp
),
10185 emit_move_insn (ud1
!= 0 ? copy_rtx (temp
) : dest
,
10186 gen_rtx_IOR (DImode
, copy_rtx (temp
),
10187 GEN_INT (ud2
<< 16)));
10189 emit_move_insn (dest
,
10190 gen_rtx_IOR (DImode
, copy_rtx (temp
),
10195 /* Helper for the following. Get rid of [r+r] memory refs
10196 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
10199 rs6000_eliminate_indexed_memrefs (rtx operands
[2])
10201 if (GET_CODE (operands
[0]) == MEM
10202 && GET_CODE (XEXP (operands
[0], 0)) != REG
10203 && ! legitimate_constant_pool_address_p (XEXP (operands
[0], 0),
10204 GET_MODE (operands
[0]), false))
10206 = replace_equiv_address (operands
[0],
10207 copy_addr_to_reg (XEXP (operands
[0], 0)));
10209 if (GET_CODE (operands
[1]) == MEM
10210 && GET_CODE (XEXP (operands
[1], 0)) != REG
10211 && ! legitimate_constant_pool_address_p (XEXP (operands
[1], 0),
10212 GET_MODE (operands
[1]), false))
10214 = replace_equiv_address (operands
[1],
10215 copy_addr_to_reg (XEXP (operands
[1], 0)));
10218 /* Generate a vector of constants to permute MODE for a little-endian
10219 storage operation by swapping the two halves of a vector. */
10221 rs6000_const_vec (machine_mode mode
)
10249 v
= rtvec_alloc (subparts
);
10251 for (i
= 0; i
< subparts
/ 2; ++i
)
10252 RTVEC_ELT (v
, i
) = gen_rtx_CONST_INT (DImode
, i
+ subparts
/ 2);
10253 for (i
= subparts
/ 2; i
< subparts
; ++i
)
10254 RTVEC_ELT (v
, i
) = gen_rtx_CONST_INT (DImode
, i
- subparts
/ 2);
10259 /* Emit an lxvd2x, stxvd2x, or xxpermdi instruction for a VSX load or
10260 store operation. */
10262 rs6000_emit_le_vsx_permute (rtx dest
, rtx source
, machine_mode mode
)
10264 /* Scalar permutations are easier to express in integer modes rather than
10265 floating-point modes, so cast them here. We use V1TImode instead
10266 of TImode to ensure that the values don't go through GPRs. */
10267 if (FLOAT128_VECTOR_P (mode
))
10269 dest
= gen_lowpart (V1TImode
, dest
);
10270 source
= gen_lowpart (V1TImode
, source
);
10274 /* Use ROTATE instead of VEC_SELECT if the mode contains only a single
10276 if (mode
== TImode
|| mode
== V1TImode
)
10277 emit_insn (gen_rtx_SET (dest
, gen_rtx_ROTATE (mode
, source
,
10281 rtx par
= gen_rtx_PARALLEL (VOIDmode
, rs6000_const_vec (mode
));
10282 emit_insn (gen_rtx_SET (dest
, gen_rtx_VEC_SELECT (mode
, source
, par
)));
10286 /* Emit a little-endian load from vector memory location SOURCE to VSX
10287 register DEST in mode MODE. The load is done with two permuting
10288 insn's that represent an lxvd2x and xxpermdi. */
10290 rs6000_emit_le_vsx_load (rtx dest
, rtx source
, machine_mode mode
)
10292 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
10294 if (mode
== TImode
|| mode
== V1TImode
)
10297 dest
= gen_lowpart (V2DImode
, dest
);
10298 source
= adjust_address (source
, V2DImode
, 0);
10301 rtx tmp
= can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest
) : dest
;
10302 rs6000_emit_le_vsx_permute (tmp
, source
, mode
);
10303 rs6000_emit_le_vsx_permute (dest
, tmp
, mode
);
10306 /* Emit a little-endian store to vector memory location DEST from VSX
10307 register SOURCE in mode MODE. The store is done with two permuting
10308 insn's that represent an xxpermdi and an stxvd2x. */
10310 rs6000_emit_le_vsx_store (rtx dest
, rtx source
, machine_mode mode
)
10312 /* This should never be called during or after LRA, because it does
10313 not re-permute the source register. It is intended only for use
10315 gcc_assert (!lra_in_progress
&& !reload_completed
);
10317 /* Use V2DImode to do swaps of types with 128-bit scalar parts (TImode,
10319 if (mode
== TImode
|| mode
== V1TImode
)
10322 dest
= adjust_address (dest
, V2DImode
, 0);
10323 source
= gen_lowpart (V2DImode
, source
);
10326 rtx tmp
= can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source
) : source
;
10327 rs6000_emit_le_vsx_permute (tmp
, source
, mode
);
10328 rs6000_emit_le_vsx_permute (dest
, tmp
, mode
);
10331 /* Emit a sequence representing a little-endian VSX load or store,
10332 moving data from SOURCE to DEST in mode MODE. This is done
10333 separately from rs6000_emit_move to ensure it is called only
10334 during expand. LE VSX loads and stores introduced later are
10335 handled with a split. The expand-time RTL generation allows
10336 us to optimize away redundant pairs of register-permutes. */
10338 rs6000_emit_le_vsx_move (rtx dest
, rtx source
, machine_mode mode
)
10340 gcc_assert (!BYTES_BIG_ENDIAN
10341 && VECTOR_MEM_VSX_P (mode
)
10342 && !TARGET_P9_VECTOR
10343 && !gpr_or_gpr_p (dest
, source
)
10344 && (MEM_P (source
) ^ MEM_P (dest
)));
10346 if (MEM_P (source
))
10348 gcc_assert (REG_P (dest
) || GET_CODE (dest
) == SUBREG
);
10349 rs6000_emit_le_vsx_load (dest
, source
, mode
);
10353 if (!REG_P (source
))
10354 source
= force_reg (mode
, source
);
10355 rs6000_emit_le_vsx_store (dest
, source
, mode
);
10359 /* Return whether a SFmode or SImode move can be done without converting one
10360 mode to another. This arrises when we have:
10362 (SUBREG:SF (REG:SI ...))
10363 (SUBREG:SI (REG:SF ...))
10365 and one of the values is in a floating point/vector register, where SFmode
10366 scalars are stored in DFmode format. */
10369 valid_sf_si_move (rtx dest
, rtx src
, machine_mode mode
)
10371 if (TARGET_ALLOW_SF_SUBREG
)
10374 if (mode
!= SFmode
&& GET_MODE_CLASS (mode
) != MODE_INT
)
10377 if (!SUBREG_P (src
) || !sf_subreg_operand (src
, mode
))
10380 /*. Allow (set (SUBREG:SI (REG:SF)) (SUBREG:SI (REG:SF))). */
10381 if (SUBREG_P (dest
))
10383 rtx dest_subreg
= SUBREG_REG (dest
);
10384 rtx src_subreg
= SUBREG_REG (src
);
10385 return GET_MODE (dest_subreg
) == GET_MODE (src_subreg
);
10392 /* Helper function to change moves with:
10394 (SUBREG:SF (REG:SI)) and
10395 (SUBREG:SI (REG:SF))
10397 into separate UNSPEC insns. In the PowerPC architecture, scalar SFmode
10398 values are stored as DFmode values in the VSX registers. We need to convert
10399 the bits before we can use a direct move or operate on the bits in the
10400 vector register as an integer type.
10402 Skip things like (set (SUBREG:SI (...) (SUBREG:SI (...)). */
10405 rs6000_emit_move_si_sf_subreg (rtx dest
, rtx source
, machine_mode mode
)
10407 if (TARGET_DIRECT_MOVE_64BIT
&& !lra_in_progress
&& !reload_completed
10408 && (!SUBREG_P (dest
) || !sf_subreg_operand (dest
, mode
))
10409 && SUBREG_P (source
) && sf_subreg_operand (source
, mode
))
10411 rtx inner_source
= SUBREG_REG (source
);
10412 machine_mode inner_mode
= GET_MODE (inner_source
);
10414 if (mode
== SImode
&& inner_mode
== SFmode
)
10416 emit_insn (gen_movsi_from_sf (dest
, inner_source
));
10420 if (mode
== SFmode
&& inner_mode
== SImode
)
10422 emit_insn (gen_movsf_from_si (dest
, inner_source
));
10430 /* Emit a move from SOURCE to DEST in mode MODE. */
10432 rs6000_emit_move (rtx dest
, rtx source
, machine_mode mode
)
10435 operands
[0] = dest
;
10436 operands
[1] = source
;
10438 if (TARGET_DEBUG_ADDR
)
10441 "\nrs6000_emit_move: mode = %s, lra_in_progress = %d, "
10442 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
10443 GET_MODE_NAME (mode
),
10446 can_create_pseudo_p ());
10448 fprintf (stderr
, "source:\n");
10449 debug_rtx (source
);
10452 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
10453 if (CONST_WIDE_INT_P (operands
[1])
10454 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
10456 /* This should be fixed with the introduction of CONST_WIDE_INT. */
10457 gcc_unreachable ();
10460 /* See if we need to special case SImode/SFmode SUBREG moves. */
10461 if ((mode
== SImode
|| mode
== SFmode
) && SUBREG_P (source
)
10462 && rs6000_emit_move_si_sf_subreg (dest
, source
, mode
))
10465 /* Check if GCC is setting up a block move that will end up using FP
10466 registers as temporaries. We must make sure this is acceptable. */
10467 if (GET_CODE (operands
[0]) == MEM
10468 && GET_CODE (operands
[1]) == MEM
10470 && (rs6000_slow_unaligned_access (DImode
, MEM_ALIGN (operands
[0]))
10471 || rs6000_slow_unaligned_access (DImode
, MEM_ALIGN (operands
[1])))
10472 && ! (rs6000_slow_unaligned_access (SImode
,
10473 (MEM_ALIGN (operands
[0]) > 32
10474 ? 32 : MEM_ALIGN (operands
[0])))
10475 || rs6000_slow_unaligned_access (SImode
,
10476 (MEM_ALIGN (operands
[1]) > 32
10477 ? 32 : MEM_ALIGN (operands
[1]))))
10478 && ! MEM_VOLATILE_P (operands
[0])
10479 && ! MEM_VOLATILE_P (operands
[1]))
10481 emit_move_insn (adjust_address (operands
[0], SImode
, 0),
10482 adjust_address (operands
[1], SImode
, 0));
10483 emit_move_insn (adjust_address (copy_rtx (operands
[0]), SImode
, 4),
10484 adjust_address (copy_rtx (operands
[1]), SImode
, 4));
10488 if (can_create_pseudo_p () && GET_CODE (operands
[0]) == MEM
10489 && !gpc_reg_operand (operands
[1], mode
))
10490 operands
[1] = force_reg (mode
, operands
[1]);
10492 /* Recognize the case where operand[1] is a reference to thread-local
10493 data and load its address to a register. */
10494 if (tls_referenced_p (operands
[1]))
10496 enum tls_model model
;
10497 rtx tmp
= operands
[1];
10500 if (GET_CODE (tmp
) == CONST
&& GET_CODE (XEXP (tmp
, 0)) == PLUS
)
10502 addend
= XEXP (XEXP (tmp
, 0), 1);
10503 tmp
= XEXP (XEXP (tmp
, 0), 0);
10506 gcc_assert (GET_CODE (tmp
) == SYMBOL_REF
);
10507 model
= SYMBOL_REF_TLS_MODEL (tmp
);
10508 gcc_assert (model
!= 0);
10510 tmp
= rs6000_legitimize_tls_address (tmp
, model
);
10513 tmp
= gen_rtx_PLUS (mode
, tmp
, addend
);
10514 tmp
= force_operand (tmp
, operands
[0]);
10519 /* 128-bit constant floating-point values on Darwin should really be loaded
10520 as two parts. However, this premature splitting is a problem when DFmode
10521 values can go into Altivec registers. */
10522 if (FLOAT128_IBM_P (mode
) && !reg_addr
[DFmode
].scalar_in_vmx_p
10523 && GET_CODE (operands
[1]) == CONST_DOUBLE
)
10525 rs6000_emit_move (simplify_gen_subreg (DFmode
, operands
[0], mode
, 0),
10526 simplify_gen_subreg (DFmode
, operands
[1], mode
, 0),
10528 rs6000_emit_move (simplify_gen_subreg (DFmode
, operands
[0], mode
,
10529 GET_MODE_SIZE (DFmode
)),
10530 simplify_gen_subreg (DFmode
, operands
[1], mode
,
10531 GET_MODE_SIZE (DFmode
)),
10536 /* Transform (p0:DD, (SUBREG:DD p1:SD)) to ((SUBREG:SD p0:DD),
10537 p1:SD) if p1 is not of floating point class and p0 is spilled as
10538 we can have no analogous movsd_store for this. */
10539 if (lra_in_progress
&& mode
== DDmode
10540 && REG_P (operands
[0]) && REGNO (operands
[0]) >= FIRST_PSEUDO_REGISTER
10541 && reg_preferred_class (REGNO (operands
[0])) == NO_REGS
10542 && GET_CODE (operands
[1]) == SUBREG
&& REG_P (SUBREG_REG (operands
[1]))
10543 && GET_MODE (SUBREG_REG (operands
[1])) == SDmode
)
10546 int regno
= REGNO (SUBREG_REG (operands
[1]));
10548 if (regno
>= FIRST_PSEUDO_REGISTER
)
10550 cl
= reg_preferred_class (regno
);
10551 regno
= cl
== NO_REGS
? -1 : ira_class_hard_regs
[cl
][1];
10553 if (regno
>= 0 && ! FP_REGNO_P (regno
))
10556 operands
[0] = gen_lowpart_SUBREG (SDmode
, operands
[0]);
10557 operands
[1] = SUBREG_REG (operands
[1]);
10560 if (lra_in_progress
10562 && REG_P (operands
[0]) && REGNO (operands
[0]) >= FIRST_PSEUDO_REGISTER
10563 && reg_preferred_class (REGNO (operands
[0])) == NO_REGS
10564 && (REG_P (operands
[1])
10565 || (GET_CODE (operands
[1]) == SUBREG
10566 && REG_P (SUBREG_REG (operands
[1])))))
10568 int regno
= REGNO (GET_CODE (operands
[1]) == SUBREG
10569 ? SUBREG_REG (operands
[1]) : operands
[1]);
10572 if (regno
>= FIRST_PSEUDO_REGISTER
)
10574 cl
= reg_preferred_class (regno
);
10575 gcc_assert (cl
!= NO_REGS
);
10576 regno
= ira_class_hard_regs
[cl
][0];
10578 if (FP_REGNO_P (regno
))
10580 if (GET_MODE (operands
[0]) != DDmode
)
10581 operands
[0] = gen_rtx_SUBREG (DDmode
, operands
[0], 0);
10582 emit_insn (gen_movsd_store (operands
[0], operands
[1]));
10584 else if (INT_REGNO_P (regno
))
10585 emit_insn (gen_movsd_hardfloat (operands
[0], operands
[1]));
10590 /* Transform ((SUBREG:DD p0:SD), p1:DD) to (p0:SD, (SUBREG:SD
10591 p:DD)) if p0 is not of floating point class and p1 is spilled as
10592 we can have no analogous movsd_load for this. */
10593 if (lra_in_progress
&& mode
== DDmode
10594 && GET_CODE (operands
[0]) == SUBREG
&& REG_P (SUBREG_REG (operands
[0]))
10595 && GET_MODE (SUBREG_REG (operands
[0])) == SDmode
10596 && REG_P (operands
[1]) && REGNO (operands
[1]) >= FIRST_PSEUDO_REGISTER
10597 && reg_preferred_class (REGNO (operands
[1])) == NO_REGS
)
10600 int regno
= REGNO (SUBREG_REG (operands
[0]));
10602 if (regno
>= FIRST_PSEUDO_REGISTER
)
10604 cl
= reg_preferred_class (regno
);
10605 regno
= cl
== NO_REGS
? -1 : ira_class_hard_regs
[cl
][0];
10607 if (regno
>= 0 && ! FP_REGNO_P (regno
))
10610 operands
[0] = SUBREG_REG (operands
[0]);
10611 operands
[1] = gen_lowpart_SUBREG (SDmode
, operands
[1]);
10614 if (lra_in_progress
10616 && (REG_P (operands
[0])
10617 || (GET_CODE (operands
[0]) == SUBREG
10618 && REG_P (SUBREG_REG (operands
[0]))))
10619 && REG_P (operands
[1]) && REGNO (operands
[1]) >= FIRST_PSEUDO_REGISTER
10620 && reg_preferred_class (REGNO (operands
[1])) == NO_REGS
)
10622 int regno
= REGNO (GET_CODE (operands
[0]) == SUBREG
10623 ? SUBREG_REG (operands
[0]) : operands
[0]);
10626 if (regno
>= FIRST_PSEUDO_REGISTER
)
10628 cl
= reg_preferred_class (regno
);
10629 gcc_assert (cl
!= NO_REGS
);
10630 regno
= ira_class_hard_regs
[cl
][0];
10632 if (FP_REGNO_P (regno
))
10634 if (GET_MODE (operands
[1]) != DDmode
)
10635 operands
[1] = gen_rtx_SUBREG (DDmode
, operands
[1], 0);
10636 emit_insn (gen_movsd_load (operands
[0], operands
[1]));
10638 else if (INT_REGNO_P (regno
))
10639 emit_insn (gen_movsd_hardfloat (operands
[0], operands
[1]));
10645 /* FIXME: In the long term, this switch statement should go away
10646 and be replaced by a sequence of tests based on things like
10652 if (CONSTANT_P (operands
[1])
10653 && GET_CODE (operands
[1]) != CONST_INT
)
10654 operands
[1] = force_const_mem (mode
, operands
[1]);
10661 if (FLOAT128_2REG_P (mode
))
10662 rs6000_eliminate_indexed_memrefs (operands
);
10669 if (CONSTANT_P (operands
[1])
10670 && ! easy_fp_constant (operands
[1], mode
))
10671 operands
[1] = force_const_mem (mode
, operands
[1]);
10683 if (CONSTANT_P (operands
[1])
10684 && !easy_vector_constant (operands
[1], mode
))
10685 operands
[1] = force_const_mem (mode
, operands
[1]);
10690 /* Use default pattern for address of ELF small data */
10693 && DEFAULT_ABI
== ABI_V4
10694 && (GET_CODE (operands
[1]) == SYMBOL_REF
10695 || GET_CODE (operands
[1]) == CONST
)
10696 && small_data_operand (operands
[1], mode
))
10698 emit_insn (gen_rtx_SET (operands
[0], operands
[1]));
10702 if (DEFAULT_ABI
== ABI_V4
10703 && mode
== Pmode
&& mode
== SImode
10704 && flag_pic
== 1 && got_operand (operands
[1], mode
))
10706 emit_insn (gen_movsi_got (operands
[0], operands
[1]));
10710 if ((TARGET_ELF
|| DEFAULT_ABI
== ABI_DARWIN
)
10714 && CONSTANT_P (operands
[1])
10715 && GET_CODE (operands
[1]) != HIGH
10716 && GET_CODE (operands
[1]) != CONST_INT
)
10718 rtx target
= (!can_create_pseudo_p ()
10720 : gen_reg_rtx (mode
));
10722 /* If this is a function address on -mcall-aixdesc,
10723 convert it to the address of the descriptor. */
10724 if (DEFAULT_ABI
== ABI_AIX
10725 && GET_CODE (operands
[1]) == SYMBOL_REF
10726 && XSTR (operands
[1], 0)[0] == '.')
10728 const char *name
= XSTR (operands
[1], 0);
10730 while (*name
== '.')
10732 new_ref
= gen_rtx_SYMBOL_REF (Pmode
, name
);
10733 CONSTANT_POOL_ADDRESS_P (new_ref
)
10734 = CONSTANT_POOL_ADDRESS_P (operands
[1]);
10735 SYMBOL_REF_FLAGS (new_ref
) = SYMBOL_REF_FLAGS (operands
[1]);
10736 SYMBOL_REF_USED (new_ref
) = SYMBOL_REF_USED (operands
[1]);
10737 SYMBOL_REF_DATA (new_ref
) = SYMBOL_REF_DATA (operands
[1]);
10738 operands
[1] = new_ref
;
10741 if (DEFAULT_ABI
== ABI_DARWIN
)
10744 if (MACHO_DYNAMIC_NO_PIC_P
)
10746 /* Take care of any required data indirection. */
10747 operands
[1] = rs6000_machopic_legitimize_pic_address (
10748 operands
[1], mode
, operands
[0]);
10749 if (operands
[0] != operands
[1])
10750 emit_insn (gen_rtx_SET (operands
[0], operands
[1]));
10754 emit_insn (gen_macho_high (target
, operands
[1]));
10755 emit_insn (gen_macho_low (operands
[0], target
, operands
[1]));
10759 emit_insn (gen_elf_high (target
, operands
[1]));
10760 emit_insn (gen_elf_low (operands
[0], target
, operands
[1]));
10764 /* If this is a SYMBOL_REF that refers to a constant pool entry,
10765 and we have put it in the TOC, we just need to make a TOC-relative
10766 reference to it. */
10768 && GET_CODE (operands
[1]) == SYMBOL_REF
10769 && use_toc_relative_ref (operands
[1], mode
))
10770 operands
[1] = create_TOC_reference (operands
[1], operands
[0]);
10771 else if (mode
== Pmode
10772 && CONSTANT_P (operands
[1])
10773 && GET_CODE (operands
[1]) != HIGH
10774 && ((GET_CODE (operands
[1]) != CONST_INT
10775 && ! easy_fp_constant (operands
[1], mode
))
10776 || (GET_CODE (operands
[1]) == CONST_INT
10777 && (num_insns_constant (operands
[1], mode
)
10778 > (TARGET_CMODEL
!= CMODEL_SMALL
? 3 : 2)))
10779 || (GET_CODE (operands
[0]) == REG
10780 && FP_REGNO_P (REGNO (operands
[0]))))
10781 && !toc_relative_expr_p (operands
[1], false, NULL
, NULL
)
10782 && (TARGET_CMODEL
== CMODEL_SMALL
10783 || can_create_pseudo_p ()
10784 || (REG_P (operands
[0])
10785 && INT_REG_OK_FOR_BASE_P (operands
[0], true))))
10789 /* Darwin uses a special PIC legitimizer. */
10790 if (DEFAULT_ABI
== ABI_DARWIN
&& MACHOPIC_INDIRECT
)
10793 rs6000_machopic_legitimize_pic_address (operands
[1], mode
,
10795 if (operands
[0] != operands
[1])
10796 emit_insn (gen_rtx_SET (operands
[0], operands
[1]));
10801 /* If we are to limit the number of things we put in the TOC and
10802 this is a symbol plus a constant we can add in one insn,
10803 just put the symbol in the TOC and add the constant. */
10804 if (GET_CODE (operands
[1]) == CONST
10805 && TARGET_NO_SUM_IN_TOC
10806 && GET_CODE (XEXP (operands
[1], 0)) == PLUS
10807 && add_operand (XEXP (XEXP (operands
[1], 0), 1), mode
)
10808 && (GET_CODE (XEXP (XEXP (operands
[1], 0), 0)) == LABEL_REF
10809 || GET_CODE (XEXP (XEXP (operands
[1], 0), 0)) == SYMBOL_REF
)
10810 && ! side_effects_p (operands
[0]))
10813 force_const_mem (mode
, XEXP (XEXP (operands
[1], 0), 0));
10814 rtx other
= XEXP (XEXP (operands
[1], 0), 1);
10816 sym
= force_reg (mode
, sym
);
10817 emit_insn (gen_add3_insn (operands
[0], sym
, other
));
10821 operands
[1] = force_const_mem (mode
, operands
[1]);
10824 && GET_CODE (XEXP (operands
[1], 0)) == SYMBOL_REF
10825 && use_toc_relative_ref (XEXP (operands
[1], 0), mode
))
10827 rtx tocref
= create_TOC_reference (XEXP (operands
[1], 0),
10829 operands
[1] = gen_const_mem (mode
, tocref
);
10830 set_mem_alias_set (operands
[1], get_TOC_alias_set ());
10836 if (!VECTOR_MEM_VSX_P (TImode
))
10837 rs6000_eliminate_indexed_memrefs (operands
);
10841 rs6000_eliminate_indexed_memrefs (operands
);
10845 fatal_insn ("bad move", gen_rtx_SET (dest
, source
));
10848 /* Above, we may have called force_const_mem which may have returned
10849 an invalid address. If we can, fix this up; otherwise, reload will
10850 have to deal with it. */
10851 if (GET_CODE (operands
[1]) == MEM
)
10852 operands
[1] = validize_mem (operands
[1]);
10854 emit_insn (gen_rtx_SET (operands
[0], operands
[1]));
10857 /* Nonzero if we can use a floating-point register to pass this arg. */
10858 #define USE_FP_FOR_ARG_P(CUM,MODE) \
10859 (SCALAR_FLOAT_MODE_NOT_VECTOR_P (MODE) \
10860 && (CUM)->fregno <= FP_ARG_MAX_REG \
10861 && TARGET_HARD_FLOAT)
10863 /* Nonzero if we can use an AltiVec register to pass this arg. */
10864 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
10865 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
10866 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
10867 && TARGET_ALTIVEC_ABI \
10870 /* Walk down the type tree of TYPE counting consecutive base elements.
10871 If *MODEP is VOIDmode, then set it to the first valid floating point
10872 or vector type. If a non-floating point or vector type is found, or
10873 if a floating point or vector type that doesn't match a non-VOIDmode
10874 *MODEP is found, then return -1, otherwise return the count in the
10878 rs6000_aggregate_candidate (const_tree type
, machine_mode
*modep
)
10881 HOST_WIDE_INT size
;
10883 switch (TREE_CODE (type
))
10886 mode
= TYPE_MODE (type
);
10887 if (!SCALAR_FLOAT_MODE_P (mode
))
10890 if (*modep
== VOIDmode
)
10893 if (*modep
== mode
)
10899 mode
= TYPE_MODE (TREE_TYPE (type
));
10900 if (!SCALAR_FLOAT_MODE_P (mode
))
10903 if (*modep
== VOIDmode
)
10906 if (*modep
== mode
)
10912 if (!TARGET_ALTIVEC_ABI
|| !TARGET_ALTIVEC
)
10915 /* Use V4SImode as representative of all 128-bit vector types. */
10916 size
= int_size_in_bytes (type
);
10926 if (*modep
== VOIDmode
)
10929 /* Vector modes are considered to be opaque: two vectors are
10930 equivalent for the purposes of being homogeneous aggregates
10931 if they are the same size. */
10932 if (*modep
== mode
)
10940 tree index
= TYPE_DOMAIN (type
);
10942 /* Can't handle incomplete types nor sizes that are not
10944 if (!COMPLETE_TYPE_P (type
)
10945 || TREE_CODE (TYPE_SIZE (type
)) != INTEGER_CST
)
10948 count
= rs6000_aggregate_candidate (TREE_TYPE (type
), modep
);
10951 || !TYPE_MAX_VALUE (index
)
10952 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index
))
10953 || !TYPE_MIN_VALUE (index
)
10954 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index
))
10958 count
*= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index
))
10959 - tree_to_uhwi (TYPE_MIN_VALUE (index
)));
10961 /* There must be no padding. */
10962 if (wi::to_wide (TYPE_SIZE (type
))
10963 != count
* GET_MODE_BITSIZE (*modep
))
10975 /* Can't handle incomplete types nor sizes that are not
10977 if (!COMPLETE_TYPE_P (type
)
10978 || TREE_CODE (TYPE_SIZE (type
)) != INTEGER_CST
)
10981 for (field
= TYPE_FIELDS (type
); field
; field
= TREE_CHAIN (field
))
10983 if (TREE_CODE (field
) != FIELD_DECL
)
10986 sub_count
= rs6000_aggregate_candidate (TREE_TYPE (field
), modep
);
10989 count
+= sub_count
;
10992 /* There must be no padding. */
10993 if (wi::to_wide (TYPE_SIZE (type
))
10994 != count
* GET_MODE_BITSIZE (*modep
))
11001 case QUAL_UNION_TYPE
:
11003 /* These aren't very interesting except in a degenerate case. */
11008 /* Can't handle incomplete types nor sizes that are not
11010 if (!COMPLETE_TYPE_P (type
)
11011 || TREE_CODE (TYPE_SIZE (type
)) != INTEGER_CST
)
11014 for (field
= TYPE_FIELDS (type
); field
; field
= TREE_CHAIN (field
))
11016 if (TREE_CODE (field
) != FIELD_DECL
)
11019 sub_count
= rs6000_aggregate_candidate (TREE_TYPE (field
), modep
);
11022 count
= count
> sub_count
? count
: sub_count
;
11025 /* There must be no padding. */
11026 if (wi::to_wide (TYPE_SIZE (type
))
11027 != count
* GET_MODE_BITSIZE (*modep
))
11040 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
11041 float or vector aggregate that shall be passed in FP/vector registers
11042 according to the ELFv2 ABI, return the homogeneous element mode in
11043 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
11045 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
11048 rs6000_discover_homogeneous_aggregate (machine_mode mode
, const_tree type
,
11049 machine_mode
*elt_mode
,
11052 /* Note that we do not accept complex types at the top level as
11053 homogeneous aggregates; these types are handled via the
11054 targetm.calls.split_complex_arg mechanism. Complex types
11055 can be elements of homogeneous aggregates, however. */
11056 if (DEFAULT_ABI
== ABI_ELFv2
&& type
&& AGGREGATE_TYPE_P (type
))
11058 machine_mode field_mode
= VOIDmode
;
11059 int field_count
= rs6000_aggregate_candidate (type
, &field_mode
);
11061 if (field_count
> 0)
11063 int n_regs
= (SCALAR_FLOAT_MODE_P (field_mode
) ?
11064 (GET_MODE_SIZE (field_mode
) + 7) >> 3 : 1);
11066 /* The ELFv2 ABI allows homogeneous aggregates to occupy
11067 up to AGGR_ARG_NUM_REG registers. */
11068 if (field_count
* n_regs
<= AGGR_ARG_NUM_REG
)
11071 *elt_mode
= field_mode
;
11073 *n_elts
= field_count
;
11086 /* Return a nonzero value to say to return the function value in
11087 memory, just as large structures are always returned. TYPE will be
11088 the data type of the value, and FNTYPE will be the type of the
11089 function doing the returning, or @code{NULL} for libcalls.
11091 The AIX ABI for the RS/6000 specifies that all structures are
11092 returned in memory. The Darwin ABI does the same.
11094 For the Darwin 64 Bit ABI, a function result can be returned in
11095 registers or in memory, depending on the size of the return data
11096 type. If it is returned in registers, the value occupies the same
11097 registers as it would if it were the first and only function
11098 argument. Otherwise, the function places its result in memory at
11099 the location pointed to by GPR3.
11101 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
11102 but a draft put them in memory, and GCC used to implement the draft
11103 instead of the final standard. Therefore, aix_struct_return
11104 controls this instead of DEFAULT_ABI; V.4 targets needing backward
11105 compatibility can change DRAFT_V4_STRUCT_RET to override the
11106 default, and -m switches get the final word. See
11107 rs6000_option_override_internal for more details.
11109 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
11110 long double support is enabled. These values are returned in memory.
11112 int_size_in_bytes returns -1 for variable size objects, which go in
11113 memory always. The cast to unsigned makes -1 > 8. */
11116 rs6000_return_in_memory (const_tree type
, const_tree fntype ATTRIBUTE_UNUSED
)
11118 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
11120 && rs6000_darwin64_abi
11121 && TREE_CODE (type
) == RECORD_TYPE
11122 && int_size_in_bytes (type
) > 0)
11124 CUMULATIVE_ARGS valcum
;
11128 valcum
.fregno
= FP_ARG_MIN_REG
;
11129 valcum
.vregno
= ALTIVEC_ARG_MIN_REG
;
11130 /* Do a trial code generation as if this were going to be passed
11131 as an argument; if any part goes in memory, we return NULL. */
11132 valret
= rs6000_darwin64_record_arg (&valcum
, type
, true, true);
11135 /* Otherwise fall through to more conventional ABI rules. */
11138 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
11139 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type
), type
,
11143 /* The ELFv2 ABI returns aggregates up to 16B in registers */
11144 if (DEFAULT_ABI
== ABI_ELFv2
&& AGGREGATE_TYPE_P (type
)
11145 && (unsigned HOST_WIDE_INT
) int_size_in_bytes (type
) <= 16)
11148 if (AGGREGATE_TYPE_P (type
)
11149 && (aix_struct_return
11150 || (unsigned HOST_WIDE_INT
) int_size_in_bytes (type
) > 8))
11153 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
11154 modes only exist for GCC vector types if -maltivec. */
11155 if (TARGET_32BIT
&& !TARGET_ALTIVEC_ABI
11156 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type
)))
11159 /* Return synthetic vectors in memory. */
11160 if (TREE_CODE (type
) == VECTOR_TYPE
11161 && int_size_in_bytes (type
) > (TARGET_ALTIVEC_ABI
? 16 : 8))
11163 static bool warned_for_return_big_vectors
= false;
11164 if (!warned_for_return_big_vectors
)
11166 warning (OPT_Wpsabi
, "GCC vector returned by reference: "
11167 "non-standard ABI extension with no compatibility "
11169 warned_for_return_big_vectors
= true;
11174 if (DEFAULT_ABI
== ABI_V4
&& TARGET_IEEEQUAD
11175 && FLOAT128_IEEE_P (TYPE_MODE (type
)))
11181 /* Specify whether values returned in registers should be at the most
11182 significant end of a register. We want aggregates returned by
11183 value to match the way aggregates are passed to functions. */
11186 rs6000_return_in_msb (const_tree valtype
)
11188 return (DEFAULT_ABI
== ABI_ELFv2
11189 && BYTES_BIG_ENDIAN
11190 && AGGREGATE_TYPE_P (valtype
)
11191 && (rs6000_function_arg_padding (TYPE_MODE (valtype
), valtype
)
11195 #ifdef HAVE_AS_GNU_ATTRIBUTE
11196 /* Return TRUE if a call to function FNDECL may be one that
11197 potentially affects the function calling ABI of the object file. */
11200 call_ABI_of_interest (tree fndecl
)
11202 if (rs6000_gnu_attr
&& symtab
->state
== EXPANSION
)
11204 struct cgraph_node
*c_node
;
11206 /* Libcalls are always interesting. */
11207 if (fndecl
== NULL_TREE
)
11210 /* Any call to an external function is interesting. */
11211 if (DECL_EXTERNAL (fndecl
))
11214 /* Interesting functions that we are emitting in this object file. */
11215 c_node
= cgraph_node::get (fndecl
);
11216 c_node
= c_node
->ultimate_alias_target ();
11217 return !c_node
->only_called_directly_p ();
11223 /* Initialize a variable CUM of type CUMULATIVE_ARGS
11224 for a call to a function whose data type is FNTYPE.
11225 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
11227 For incoming args we set the number of arguments in the prototype large
11228 so we never return a PARALLEL. */
11231 init_cumulative_args (CUMULATIVE_ARGS
*cum
, tree fntype
,
11232 rtx libname ATTRIBUTE_UNUSED
, int incoming
,
11233 int libcall
, int n_named_args
,
11234 tree fndecl ATTRIBUTE_UNUSED
,
11235 machine_mode return_mode ATTRIBUTE_UNUSED
)
11237 static CUMULATIVE_ARGS zero_cumulative
;
11239 *cum
= zero_cumulative
;
11241 cum
->fregno
= FP_ARG_MIN_REG
;
11242 cum
->vregno
= ALTIVEC_ARG_MIN_REG
;
11243 cum
->prototype
= (fntype
&& prototype_p (fntype
));
11244 cum
->call_cookie
= ((DEFAULT_ABI
== ABI_V4
&& libcall
)
11245 ? CALL_LIBCALL
: CALL_NORMAL
);
11246 cum
->sysv_gregno
= GP_ARG_MIN_REG
;
11247 cum
->stdarg
= stdarg_p (fntype
);
11248 cum
->libcall
= libcall
;
11250 cum
->nargs_prototype
= 0;
11251 if (incoming
|| cum
->prototype
)
11252 cum
->nargs_prototype
= n_named_args
;
11254 /* Check for a longcall attribute. */
11255 if ((!fntype
&& rs6000_default_long_calls
)
11257 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype
))
11258 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype
))))
11259 cum
->call_cookie
|= CALL_LONG
;
11261 if (TARGET_DEBUG_ARG
)
11263 fprintf (stderr
, "\ninit_cumulative_args:");
11266 tree ret_type
= TREE_TYPE (fntype
);
11267 fprintf (stderr
, " ret code = %s,",
11268 get_tree_code_name (TREE_CODE (ret_type
)));
11271 if (cum
->call_cookie
& CALL_LONG
)
11272 fprintf (stderr
, " longcall,");
11274 fprintf (stderr
, " proto = %d, nargs = %d\n",
11275 cum
->prototype
, cum
->nargs_prototype
);
11278 #ifdef HAVE_AS_GNU_ATTRIBUTE
11279 if (TARGET_ELF
&& (TARGET_64BIT
|| DEFAULT_ABI
== ABI_V4
))
11281 cum
->escapes
= call_ABI_of_interest (fndecl
);
11288 return_type
= TREE_TYPE (fntype
);
11289 return_mode
= TYPE_MODE (return_type
);
11292 return_type
= lang_hooks
.types
.type_for_mode (return_mode
, 0);
11294 if (return_type
!= NULL
)
11296 if (TREE_CODE (return_type
) == RECORD_TYPE
11297 && TYPE_TRANSPARENT_AGGR (return_type
))
11299 return_type
= TREE_TYPE (first_field (return_type
));
11300 return_mode
= TYPE_MODE (return_type
);
11302 if (AGGREGATE_TYPE_P (return_type
)
11303 && ((unsigned HOST_WIDE_INT
) int_size_in_bytes (return_type
)
11305 rs6000_returns_struct
= true;
11307 if (SCALAR_FLOAT_MODE_P (return_mode
))
11309 rs6000_passes_float
= true;
11310 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
|| TARGET_64BIT
)
11311 && (FLOAT128_IBM_P (return_mode
)
11312 || FLOAT128_IEEE_P (return_mode
)
11313 || (return_type
!= NULL
11314 && (TYPE_MAIN_VARIANT (return_type
)
11315 == long_double_type_node
))))
11316 rs6000_passes_long_double
= true;
11318 if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode
)
11319 || PAIRED_VECTOR_MODE (return_mode
))
11320 rs6000_passes_vector
= true;
11327 && TARGET_ALTIVEC_ABI
11328 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype
))))
11330 error ("cannot return value in vector register because"
11331 " altivec instructions are disabled, use %qs"
11332 " to enable them", "-maltivec");
11336 /* The mode the ABI uses for a word. This is not the same as word_mode
11337 for -m32 -mpowerpc64. This is used to implement various target hooks. */
11339 static scalar_int_mode
11340 rs6000_abi_word_mode (void)
11342 return TARGET_32BIT
? SImode
: DImode
;
11345 /* Implement the TARGET_OFFLOAD_OPTIONS hook. */
11347 rs6000_offload_options (void)
11350 return xstrdup ("-foffload-abi=lp64");
11352 return xstrdup ("-foffload-abi=ilp32");
11355 /* On rs6000, function arguments are promoted, as are function return
11358 static machine_mode
11359 rs6000_promote_function_mode (const_tree type ATTRIBUTE_UNUSED
,
11361 int *punsignedp ATTRIBUTE_UNUSED
,
11364 PROMOTE_MODE (mode
, *punsignedp
, type
);
11369 /* Return true if TYPE must be passed on the stack and not in registers. */
11372 rs6000_must_pass_in_stack (machine_mode mode
, const_tree type
)
11374 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
|| TARGET_64BIT
)
11375 return must_pass_in_stack_var_size (mode
, type
);
11377 return must_pass_in_stack_var_size_or_pad (mode
, type
);
11381 is_complex_IBM_long_double (machine_mode mode
)
11383 return mode
== ICmode
|| (!TARGET_IEEEQUAD
&& mode
== TCmode
);
11386 /* Whether ABI_V4 passes MODE args to a function in floating point
11390 abi_v4_pass_in_fpr (machine_mode mode
)
11392 if (!TARGET_HARD_FLOAT
)
11394 if (TARGET_SINGLE_FLOAT
&& mode
== SFmode
)
11396 if (TARGET_DOUBLE_FLOAT
&& mode
== DFmode
)
11398 /* ABI_V4 passes complex IBM long double in 8 gprs.
11399 Stupid, but we can't change the ABI now. */
11400 if (is_complex_IBM_long_double (mode
))
11402 if (FLOAT128_2REG_P (mode
))
11404 if (DECIMAL_FLOAT_MODE_P (mode
))
11409 /* Implement TARGET_FUNCTION_ARG_PADDING.
11411 For the AIX ABI structs are always stored left shifted in their
11414 static pad_direction
11415 rs6000_function_arg_padding (machine_mode mode
, const_tree type
)
11417 #ifndef AGGREGATE_PADDING_FIXED
11418 #define AGGREGATE_PADDING_FIXED 0
11420 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
11421 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
11424 if (!AGGREGATE_PADDING_FIXED
)
11426 /* GCC used to pass structures of the same size as integer types as
11427 if they were in fact integers, ignoring TARGET_FUNCTION_ARG_PADDING.
11428 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
11429 passed padded downward, except that -mstrict-align further
11430 muddied the water in that multi-component structures of 2 and 4
11431 bytes in size were passed padded upward.
11433 The following arranges for best compatibility with previous
11434 versions of gcc, but removes the -mstrict-align dependency. */
11435 if (BYTES_BIG_ENDIAN
)
11437 HOST_WIDE_INT size
= 0;
11439 if (mode
== BLKmode
)
11441 if (type
&& TREE_CODE (TYPE_SIZE (type
)) == INTEGER_CST
)
11442 size
= int_size_in_bytes (type
);
11445 size
= GET_MODE_SIZE (mode
);
11447 if (size
== 1 || size
== 2 || size
== 4)
11448 return PAD_DOWNWARD
;
11453 if (AGGREGATES_PAD_UPWARD_ALWAYS
)
11455 if (type
!= 0 && AGGREGATE_TYPE_P (type
))
11459 /* Fall back to the default. */
11460 return default_function_arg_padding (mode
, type
);
11463 /* If defined, a C expression that gives the alignment boundary, in bits,
11464 of an argument with the specified mode and type. If it is not defined,
11465 PARM_BOUNDARY is used for all arguments.
11467 V.4 wants long longs and doubles to be double word aligned. Just
11468 testing the mode size is a boneheaded way to do this as it means
11469 that other types such as complex int are also double word aligned.
11470 However, we're stuck with this because changing the ABI might break
11471 existing library interfaces.
11473 Quadword align Altivec/VSX vectors.
11474 Quadword align large synthetic vector types. */
11476 static unsigned int
11477 rs6000_function_arg_boundary (machine_mode mode
, const_tree type
)
11479 machine_mode elt_mode
;
11482 rs6000_discover_homogeneous_aggregate (mode
, type
, &elt_mode
, &n_elts
);
11484 if (DEFAULT_ABI
== ABI_V4
11485 && (GET_MODE_SIZE (mode
) == 8
11486 || (TARGET_HARD_FLOAT
11487 && !is_complex_IBM_long_double (mode
)
11488 && FLOAT128_2REG_P (mode
))))
11490 else if (FLOAT128_VECTOR_P (mode
))
11492 else if (PAIRED_VECTOR_MODE (mode
)
11493 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
11494 && int_size_in_bytes (type
) >= 8
11495 && int_size_in_bytes (type
) < 16))
11497 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode
)
11498 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
11499 && int_size_in_bytes (type
) >= 16))
11502 /* Aggregate types that need > 8 byte alignment are quadword-aligned
11503 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
11504 -mcompat-align-parm is used. */
11505 if (((DEFAULT_ABI
== ABI_AIX
&& !rs6000_compat_align_parm
)
11506 || DEFAULT_ABI
== ABI_ELFv2
)
11507 && type
&& TYPE_ALIGN (type
) > 64)
11509 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
11510 or homogeneous float/vector aggregates here. We already handled
11511 vector aggregates above, but still need to check for float here. */
11512 bool aggregate_p
= (AGGREGATE_TYPE_P (type
)
11513 && !SCALAR_FLOAT_MODE_P (elt_mode
));
11515 /* We used to check for BLKmode instead of the above aggregate type
11516 check. Warn when this results in any difference to the ABI. */
11517 if (aggregate_p
!= (mode
== BLKmode
))
11519 static bool warned
;
11520 if (!warned
&& warn_psabi
)
11523 inform (input_location
,
11524 "the ABI of passing aggregates with %d-byte alignment"
11525 " has changed in GCC 5",
11526 (int) TYPE_ALIGN (type
) / BITS_PER_UNIT
);
11534 /* Similar for the Darwin64 ABI. Note that for historical reasons we
11535 implement the "aggregate type" check as a BLKmode check here; this
11536 means certain aggregate types are in fact not aligned. */
11537 if (TARGET_MACHO
&& rs6000_darwin64_abi
11539 && type
&& TYPE_ALIGN (type
) > 64)
11542 return PARM_BOUNDARY
;
11545 /* The offset in words to the start of the parameter save area. */
11547 static unsigned int
11548 rs6000_parm_offset (void)
11550 return (DEFAULT_ABI
== ABI_V4
? 2
11551 : DEFAULT_ABI
== ABI_ELFv2
? 4
11555 /* For a function parm of MODE and TYPE, return the starting word in
11556 the parameter area. NWORDS of the parameter area are already used. */
11558 static unsigned int
11559 rs6000_parm_start (machine_mode mode
, const_tree type
,
11560 unsigned int nwords
)
11562 unsigned int align
;
11564 align
= rs6000_function_arg_boundary (mode
, type
) / PARM_BOUNDARY
- 1;
11565 return nwords
+ (-(rs6000_parm_offset () + nwords
) & align
);
11568 /* Compute the size (in words) of a function argument. */
11570 static unsigned long
11571 rs6000_arg_size (machine_mode mode
, const_tree type
)
11573 unsigned long size
;
11575 if (mode
!= BLKmode
)
11576 size
= GET_MODE_SIZE (mode
);
11578 size
= int_size_in_bytes (type
);
11581 return (size
+ 3) >> 2;
11583 return (size
+ 7) >> 3;
11586 /* Use this to flush pending int fields. */
11589 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS
*cum
,
11590 HOST_WIDE_INT bitpos
, int final
)
11592 unsigned int startbit
, endbit
;
11593 int intregs
, intoffset
;
11595 /* Handle the situations where a float is taking up the first half
11596 of the GPR, and the other half is empty (typically due to
11597 alignment restrictions). We can detect this by a 8-byte-aligned
11598 int field, or by seeing that this is the final flush for this
11599 argument. Count the word and continue on. */
11600 if (cum
->floats_in_gpr
== 1
11601 && (cum
->intoffset
% 64 == 0
11602 || (cum
->intoffset
== -1 && final
)))
11605 cum
->floats_in_gpr
= 0;
11608 if (cum
->intoffset
== -1)
11611 intoffset
= cum
->intoffset
;
11612 cum
->intoffset
= -1;
11613 cum
->floats_in_gpr
= 0;
11615 if (intoffset
% BITS_PER_WORD
!= 0)
11617 unsigned int bits
= BITS_PER_WORD
- intoffset
% BITS_PER_WORD
;
11618 if (!int_mode_for_size (bits
, 0).exists ())
11620 /* We couldn't find an appropriate mode, which happens,
11621 e.g., in packed structs when there are 3 bytes to load.
11622 Back intoffset back to the beginning of the word in this
11624 intoffset
= ROUND_DOWN (intoffset
, BITS_PER_WORD
);
11628 startbit
= ROUND_DOWN (intoffset
, BITS_PER_WORD
);
11629 endbit
= ROUND_UP (bitpos
, BITS_PER_WORD
);
11630 intregs
= (endbit
- startbit
) / BITS_PER_WORD
;
11631 cum
->words
+= intregs
;
11632 /* words should be unsigned. */
11633 if ((unsigned)cum
->words
< (endbit
/BITS_PER_WORD
))
11635 int pad
= (endbit
/BITS_PER_WORD
) - cum
->words
;
11640 /* The darwin64 ABI calls for us to recurse down through structs,
11641 looking for elements passed in registers. Unfortunately, we have
11642 to track int register count here also because of misalignments
11643 in powerpc alignment mode. */
11646 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS
*cum
,
11648 HOST_WIDE_INT startbitpos
)
11652 for (f
= TYPE_FIELDS (type
); f
; f
= DECL_CHAIN (f
))
11653 if (TREE_CODE (f
) == FIELD_DECL
)
11655 HOST_WIDE_INT bitpos
= startbitpos
;
11656 tree ftype
= TREE_TYPE (f
);
11658 if (ftype
== error_mark_node
)
11660 mode
= TYPE_MODE (ftype
);
11662 if (DECL_SIZE (f
) != 0
11663 && tree_fits_uhwi_p (bit_position (f
)))
11664 bitpos
+= int_bit_position (f
);
11666 /* ??? FIXME: else assume zero offset. */
11668 if (TREE_CODE (ftype
) == RECORD_TYPE
)
11669 rs6000_darwin64_record_arg_advance_recurse (cum
, ftype
, bitpos
);
11670 else if (USE_FP_FOR_ARG_P (cum
, mode
))
11672 unsigned n_fpregs
= (GET_MODE_SIZE (mode
) + 7) >> 3;
11673 rs6000_darwin64_record_arg_advance_flush (cum
, bitpos
, 0);
11674 cum
->fregno
+= n_fpregs
;
11675 /* Single-precision floats present a special problem for
11676 us, because they are smaller than an 8-byte GPR, and so
11677 the structure-packing rules combined with the standard
11678 varargs behavior mean that we want to pack float/float
11679 and float/int combinations into a single register's
11680 space. This is complicated by the arg advance flushing,
11681 which works on arbitrarily large groups of int-type
11683 if (mode
== SFmode
)
11685 if (cum
->floats_in_gpr
== 1)
11687 /* Two floats in a word; count the word and reset
11688 the float count. */
11690 cum
->floats_in_gpr
= 0;
11692 else if (bitpos
% 64 == 0)
11694 /* A float at the beginning of an 8-byte word;
11695 count it and put off adjusting cum->words until
11696 we see if a arg advance flush is going to do it
11698 cum
->floats_in_gpr
++;
11702 /* The float is at the end of a word, preceded
11703 by integer fields, so the arg advance flush
11704 just above has already set cum->words and
11705 everything is taken care of. */
11709 cum
->words
+= n_fpregs
;
11711 else if (USE_ALTIVEC_FOR_ARG_P (cum
, mode
, 1))
11713 rs6000_darwin64_record_arg_advance_flush (cum
, bitpos
, 0);
11717 else if (cum
->intoffset
== -1)
11718 cum
->intoffset
= bitpos
;
11722 /* Check for an item that needs to be considered specially under the darwin 64
11723 bit ABI. These are record types where the mode is BLK or the structure is
11724 8 bytes in size. */
11726 rs6000_darwin64_struct_check_p (machine_mode mode
, const_tree type
)
11728 return rs6000_darwin64_abi
11729 && ((mode
== BLKmode
11730 && TREE_CODE (type
) == RECORD_TYPE
11731 && int_size_in_bytes (type
) > 0)
11732 || (type
&& TREE_CODE (type
) == RECORD_TYPE
11733 && int_size_in_bytes (type
) == 8)) ? 1 : 0;
11736 /* Update the data in CUM to advance over an argument
11737 of mode MODE and data type TYPE.
11738 (TYPE is null for libcalls where that information may not be available.)
11740 Note that for args passed by reference, function_arg will be called
11741 with MODE and TYPE set to that of the pointer to the arg, not the arg
11745 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS
*cum
, machine_mode mode
,
11746 const_tree type
, bool named
, int depth
)
11748 machine_mode elt_mode
;
11751 rs6000_discover_homogeneous_aggregate (mode
, type
, &elt_mode
, &n_elts
);
11753 /* Only tick off an argument if we're not recursing. */
11755 cum
->nargs_prototype
--;
11757 #ifdef HAVE_AS_GNU_ATTRIBUTE
11758 if (TARGET_ELF
&& (TARGET_64BIT
|| DEFAULT_ABI
== ABI_V4
)
11761 if (SCALAR_FLOAT_MODE_P (mode
))
11763 rs6000_passes_float
= true;
11764 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
|| TARGET_64BIT
)
11765 && (FLOAT128_IBM_P (mode
)
11766 || FLOAT128_IEEE_P (mode
)
11768 && TYPE_MAIN_VARIANT (type
) == long_double_type_node
)))
11769 rs6000_passes_long_double
= true;
11771 if ((named
&& ALTIVEC_OR_VSX_VECTOR_MODE (mode
))
11772 || (PAIRED_VECTOR_MODE (mode
)
11774 && cum
->sysv_gregno
<= GP_ARG_MAX_REG
))
11775 rs6000_passes_vector
= true;
11779 if (TARGET_ALTIVEC_ABI
11780 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode
)
11781 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
11782 && int_size_in_bytes (type
) == 16)))
11784 bool stack
= false;
11786 if (USE_ALTIVEC_FOR_ARG_P (cum
, elt_mode
, named
))
11788 cum
->vregno
+= n_elts
;
11790 if (!TARGET_ALTIVEC
)
11791 error ("cannot pass argument in vector register because"
11792 " altivec instructions are disabled, use %qs"
11793 " to enable them", "-maltivec");
11795 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
11796 even if it is going to be passed in a vector register.
11797 Darwin does the same for variable-argument functions. */
11798 if (((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
11800 || (cum
->stdarg
&& DEFAULT_ABI
!= ABI_V4
))
11810 /* Vector parameters must be 16-byte aligned. In 32-bit
11811 mode this means we need to take into account the offset
11812 to the parameter save area. In 64-bit mode, they just
11813 have to start on an even word, since the parameter save
11814 area is 16-byte aligned. */
11816 align
= -(rs6000_parm_offset () + cum
->words
) & 3;
11818 align
= cum
->words
& 1;
11819 cum
->words
+= align
+ rs6000_arg_size (mode
, type
);
11821 if (TARGET_DEBUG_ARG
)
11823 fprintf (stderr
, "function_adv: words = %2d, align=%d, ",
11824 cum
->words
, align
);
11825 fprintf (stderr
, "nargs = %4d, proto = %d, mode = %4s\n",
11826 cum
->nargs_prototype
, cum
->prototype
,
11827 GET_MODE_NAME (mode
));
11831 else if (TARGET_MACHO
&& rs6000_darwin64_struct_check_p (mode
, type
))
11833 int size
= int_size_in_bytes (type
);
11834 /* Variable sized types have size == -1 and are
11835 treated as if consisting entirely of ints.
11836 Pad to 16 byte boundary if needed. */
11837 if (TYPE_ALIGN (type
) >= 2 * BITS_PER_WORD
11838 && (cum
->words
% 2) != 0)
11840 /* For varargs, we can just go up by the size of the struct. */
11842 cum
->words
+= (size
+ 7) / 8;
11845 /* It is tempting to say int register count just goes up by
11846 sizeof(type)/8, but this is wrong in a case such as
11847 { int; double; int; } [powerpc alignment]. We have to
11848 grovel through the fields for these too. */
11849 cum
->intoffset
= 0;
11850 cum
->floats_in_gpr
= 0;
11851 rs6000_darwin64_record_arg_advance_recurse (cum
, type
, 0);
11852 rs6000_darwin64_record_arg_advance_flush (cum
,
11853 size
* BITS_PER_UNIT
, 1);
11855 if (TARGET_DEBUG_ARG
)
11857 fprintf (stderr
, "function_adv: words = %2d, align=%d, size=%d",
11858 cum
->words
, TYPE_ALIGN (type
), size
);
11860 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
11861 cum
->nargs_prototype
, cum
->prototype
,
11862 GET_MODE_NAME (mode
));
11865 else if (DEFAULT_ABI
== ABI_V4
)
11867 if (abi_v4_pass_in_fpr (mode
))
11869 /* _Decimal128 must use an even/odd register pair. This assumes
11870 that the register number is odd when fregno is odd. */
11871 if (mode
== TDmode
&& (cum
->fregno
% 2) == 1)
11874 if (cum
->fregno
+ (FLOAT128_2REG_P (mode
) ? 1 : 0)
11875 <= FP_ARG_V4_MAX_REG
)
11876 cum
->fregno
+= (GET_MODE_SIZE (mode
) + 7) >> 3;
11879 cum
->fregno
= FP_ARG_V4_MAX_REG
+ 1;
11880 if (mode
== DFmode
|| FLOAT128_IBM_P (mode
)
11881 || mode
== DDmode
|| mode
== TDmode
)
11882 cum
->words
+= cum
->words
& 1;
11883 cum
->words
+= rs6000_arg_size (mode
, type
);
11888 int n_words
= rs6000_arg_size (mode
, type
);
11889 int gregno
= cum
->sysv_gregno
;
11891 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11892 As does any other 2 word item such as complex int due to a
11893 historical mistake. */
11895 gregno
+= (1 - gregno
) & 1;
11897 /* Multi-reg args are not split between registers and stack. */
11898 if (gregno
+ n_words
- 1 > GP_ARG_MAX_REG
)
11900 /* Long long is aligned on the stack. So are other 2 word
11901 items such as complex int due to a historical mistake. */
11903 cum
->words
+= cum
->words
& 1;
11904 cum
->words
+= n_words
;
11907 /* Note: continuing to accumulate gregno past when we've started
11908 spilling to the stack indicates the fact that we've started
11909 spilling to the stack to expand_builtin_saveregs. */
11910 cum
->sysv_gregno
= gregno
+ n_words
;
11913 if (TARGET_DEBUG_ARG
)
11915 fprintf (stderr
, "function_adv: words = %2d, fregno = %2d, ",
11916 cum
->words
, cum
->fregno
);
11917 fprintf (stderr
, "gregno = %2d, nargs = %4d, proto = %d, ",
11918 cum
->sysv_gregno
, cum
->nargs_prototype
, cum
->prototype
);
11919 fprintf (stderr
, "mode = %4s, named = %d\n",
11920 GET_MODE_NAME (mode
), named
);
11925 int n_words
= rs6000_arg_size (mode
, type
);
11926 int start_words
= cum
->words
;
11927 int align_words
= rs6000_parm_start (mode
, type
, start_words
);
11929 cum
->words
= align_words
+ n_words
;
11931 if (SCALAR_FLOAT_MODE_P (elt_mode
) && TARGET_HARD_FLOAT
)
11933 /* _Decimal128 must be passed in an even/odd float register pair.
11934 This assumes that the register number is odd when fregno is
11936 if (elt_mode
== TDmode
&& (cum
->fregno
% 2) == 1)
11938 cum
->fregno
+= n_elts
* ((GET_MODE_SIZE (elt_mode
) + 7) >> 3);
11941 if (TARGET_DEBUG_ARG
)
11943 fprintf (stderr
, "function_adv: words = %2d, fregno = %2d, ",
11944 cum
->words
, cum
->fregno
);
11945 fprintf (stderr
, "nargs = %4d, proto = %d, mode = %4s, ",
11946 cum
->nargs_prototype
, cum
->prototype
, GET_MODE_NAME (mode
));
11947 fprintf (stderr
, "named = %d, align = %d, depth = %d\n",
11948 named
, align_words
- start_words
, depth
);
11954 rs6000_function_arg_advance (cumulative_args_t cum
, machine_mode mode
,
11955 const_tree type
, bool named
)
11957 rs6000_function_arg_advance_1 (get_cumulative_args (cum
), mode
, type
, named
,
11961 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
11962 structure between cum->intoffset and bitpos to integer registers. */
11965 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS
*cum
,
11966 HOST_WIDE_INT bitpos
, rtx rvec
[], int *k
)
11969 unsigned int regno
;
11970 unsigned int startbit
, endbit
;
11971 int this_regno
, intregs
, intoffset
;
11974 if (cum
->intoffset
== -1)
11977 intoffset
= cum
->intoffset
;
11978 cum
->intoffset
= -1;
11980 /* If this is the trailing part of a word, try to only load that
11981 much into the register. Otherwise load the whole register. Note
11982 that in the latter case we may pick up unwanted bits. It's not a
11983 problem at the moment but may wish to revisit. */
11985 if (intoffset
% BITS_PER_WORD
!= 0)
11987 unsigned int bits
= BITS_PER_WORD
- intoffset
% BITS_PER_WORD
;
11988 if (!int_mode_for_size (bits
, 0).exists (&mode
))
11990 /* We couldn't find an appropriate mode, which happens,
11991 e.g., in packed structs when there are 3 bytes to load.
11992 Back intoffset back to the beginning of the word in this
11994 intoffset
= ROUND_DOWN (intoffset
, BITS_PER_WORD
);
12001 startbit
= ROUND_DOWN (intoffset
, BITS_PER_WORD
);
12002 endbit
= ROUND_UP (bitpos
, BITS_PER_WORD
);
12003 intregs
= (endbit
- startbit
) / BITS_PER_WORD
;
12004 this_regno
= cum
->words
+ intoffset
/ BITS_PER_WORD
;
12006 if (intregs
> 0 && intregs
> GP_ARG_NUM_REG
- this_regno
)
12007 cum
->use_stack
= 1;
12009 intregs
= MIN (intregs
, GP_ARG_NUM_REG
- this_regno
);
12013 intoffset
/= BITS_PER_UNIT
;
12016 regno
= GP_ARG_MIN_REG
+ this_regno
;
12017 reg
= gen_rtx_REG (mode
, regno
);
12019 gen_rtx_EXPR_LIST (VOIDmode
, reg
, GEN_INT (intoffset
));
12022 intoffset
= (intoffset
| (UNITS_PER_WORD
-1)) + 1;
12026 while (intregs
> 0);
12029 /* Recursive workhorse for the following. */
12032 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS
*cum
, const_tree type
,
12033 HOST_WIDE_INT startbitpos
, rtx rvec
[],
12038 for (f
= TYPE_FIELDS (type
); f
; f
= DECL_CHAIN (f
))
12039 if (TREE_CODE (f
) == FIELD_DECL
)
12041 HOST_WIDE_INT bitpos
= startbitpos
;
12042 tree ftype
= TREE_TYPE (f
);
12044 if (ftype
== error_mark_node
)
12046 mode
= TYPE_MODE (ftype
);
12048 if (DECL_SIZE (f
) != 0
12049 && tree_fits_uhwi_p (bit_position (f
)))
12050 bitpos
+= int_bit_position (f
);
12052 /* ??? FIXME: else assume zero offset. */
12054 if (TREE_CODE (ftype
) == RECORD_TYPE
)
12055 rs6000_darwin64_record_arg_recurse (cum
, ftype
, bitpos
, rvec
, k
);
12056 else if (cum
->named
&& USE_FP_FOR_ARG_P (cum
, mode
))
12058 unsigned n_fpreg
= (GET_MODE_SIZE (mode
) + 7) >> 3;
12062 case E_SCmode
: mode
= SFmode
; break;
12063 case E_DCmode
: mode
= DFmode
; break;
12064 case E_TCmode
: mode
= TFmode
; break;
12068 rs6000_darwin64_record_arg_flush (cum
, bitpos
, rvec
, k
);
12069 if (cum
->fregno
+ n_fpreg
> FP_ARG_MAX_REG
+ 1)
12071 gcc_assert (cum
->fregno
== FP_ARG_MAX_REG
12072 && (mode
== TFmode
|| mode
== TDmode
));
12073 /* Long double or _Decimal128 split over regs and memory. */
12074 mode
= DECIMAL_FLOAT_MODE_P (mode
) ? DDmode
: DFmode
;
12078 = gen_rtx_EXPR_LIST (VOIDmode
,
12079 gen_rtx_REG (mode
, cum
->fregno
++),
12080 GEN_INT (bitpos
/ BITS_PER_UNIT
));
12081 if (FLOAT128_2REG_P (mode
))
12084 else if (cum
->named
&& USE_ALTIVEC_FOR_ARG_P (cum
, mode
, 1))
12086 rs6000_darwin64_record_arg_flush (cum
, bitpos
, rvec
, k
);
12088 = gen_rtx_EXPR_LIST (VOIDmode
,
12089 gen_rtx_REG (mode
, cum
->vregno
++),
12090 GEN_INT (bitpos
/ BITS_PER_UNIT
));
12092 else if (cum
->intoffset
== -1)
12093 cum
->intoffset
= bitpos
;
12097 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
12098 the register(s) to be used for each field and subfield of a struct
12099 being passed by value, along with the offset of where the
12100 register's value may be found in the block. FP fields go in FP
12101 register, vector fields go in vector registers, and everything
12102 else goes in int registers, packed as in memory.
12104 This code is also used for function return values. RETVAL indicates
12105 whether this is the case.
12107 Much of this is taken from the SPARC V9 port, which has a similar
12108 calling convention. */
12111 rs6000_darwin64_record_arg (CUMULATIVE_ARGS
*orig_cum
, const_tree type
,
12112 bool named
, bool retval
)
12114 rtx rvec
[FIRST_PSEUDO_REGISTER
];
12115 int k
= 1, kbase
= 1;
12116 HOST_WIDE_INT typesize
= int_size_in_bytes (type
);
12117 /* This is a copy; modifications are not visible to our caller. */
12118 CUMULATIVE_ARGS copy_cum
= *orig_cum
;
12119 CUMULATIVE_ARGS
*cum
= ©_cum
;
12121 /* Pad to 16 byte boundary if needed. */
12122 if (!retval
&& TYPE_ALIGN (type
) >= 2 * BITS_PER_WORD
12123 && (cum
->words
% 2) != 0)
12126 cum
->intoffset
= 0;
12127 cum
->use_stack
= 0;
12128 cum
->named
= named
;
12130 /* Put entries into rvec[] for individual FP and vector fields, and
12131 for the chunks of memory that go in int regs. Note we start at
12132 element 1; 0 is reserved for an indication of using memory, and
12133 may or may not be filled in below. */
12134 rs6000_darwin64_record_arg_recurse (cum
, type
, /* startbit pos= */ 0, rvec
, &k
);
12135 rs6000_darwin64_record_arg_flush (cum
, typesize
* BITS_PER_UNIT
, rvec
, &k
);
12137 /* If any part of the struct went on the stack put all of it there.
12138 This hack is because the generic code for
12139 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
12140 parts of the struct are not at the beginning. */
12141 if (cum
->use_stack
)
12144 return NULL_RTX
; /* doesn't go in registers at all */
12146 rvec
[0] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
12148 if (k
> 1 || cum
->use_stack
)
12149 return gen_rtx_PARALLEL (BLKmode
, gen_rtvec_v (k
- kbase
, &rvec
[kbase
]));
12154 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
12157 rs6000_mixed_function_arg (machine_mode mode
, const_tree type
,
12162 rtx rvec
[GP_ARG_NUM_REG
+ 1];
12164 if (align_words
>= GP_ARG_NUM_REG
)
12167 n_units
= rs6000_arg_size (mode
, type
);
12169 /* Optimize the simple case where the arg fits in one gpr, except in
12170 the case of BLKmode due to assign_parms assuming that registers are
12171 BITS_PER_WORD wide. */
12173 || (n_units
== 1 && mode
!= BLKmode
))
12174 return gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
12177 if (align_words
+ n_units
> GP_ARG_NUM_REG
)
12178 /* Not all of the arg fits in gprs. Say that it goes in memory too,
12179 using a magic NULL_RTX component.
12180 This is not strictly correct. Only some of the arg belongs in
12181 memory, not all of it. However, the normal scheme using
12182 function_arg_partial_nregs can result in unusual subregs, eg.
12183 (subreg:SI (reg:DF) 4), which are not handled well. The code to
12184 store the whole arg to memory is often more efficient than code
12185 to store pieces, and we know that space is available in the right
12186 place for the whole arg. */
12187 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
12192 rtx r
= gen_rtx_REG (SImode
, GP_ARG_MIN_REG
+ align_words
);
12193 rtx off
= GEN_INT (i
++ * 4);
12194 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
12196 while (++align_words
< GP_ARG_NUM_REG
&& --n_units
!= 0);
12198 return gen_rtx_PARALLEL (mode
, gen_rtvec_v (k
, rvec
));
12201 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
12202 but must also be copied into the parameter save area starting at
12203 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
12204 to the GPRs and/or memory. Return the number of elements used. */
12207 rs6000_psave_function_arg (machine_mode mode
, const_tree type
,
12208 int align_words
, rtx
*rvec
)
12212 if (align_words
< GP_ARG_NUM_REG
)
12214 int n_words
= rs6000_arg_size (mode
, type
);
12216 if (align_words
+ n_words
> GP_ARG_NUM_REG
12218 || (TARGET_32BIT
&& TARGET_POWERPC64
))
12220 /* If this is partially on the stack, then we only
12221 include the portion actually in registers here. */
12222 machine_mode rmode
= TARGET_32BIT
? SImode
: DImode
;
12225 if (align_words
+ n_words
> GP_ARG_NUM_REG
)
12227 /* Not all of the arg fits in gprs. Say that it goes in memory
12228 too, using a magic NULL_RTX component. Also see comment in
12229 rs6000_mixed_function_arg for why the normal
12230 function_arg_partial_nregs scheme doesn't work in this case. */
12231 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
12236 rtx r
= gen_rtx_REG (rmode
, GP_ARG_MIN_REG
+ align_words
);
12237 rtx off
= GEN_INT (i
++ * GET_MODE_SIZE (rmode
));
12238 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
12240 while (++align_words
< GP_ARG_NUM_REG
&& --n_words
!= 0);
12244 /* The whole arg fits in gprs. */
12245 rtx r
= gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
12246 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, const0_rtx
);
12251 /* It's entirely in memory. */
12252 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
12258 /* RVEC is a vector of K components of an argument of mode MODE.
12259 Construct the final function_arg return value from it. */
12262 rs6000_finish_function_arg (machine_mode mode
, rtx
*rvec
, int k
)
12264 gcc_assert (k
>= 1);
12266 /* Avoid returning a PARALLEL in the trivial cases. */
12269 if (XEXP (rvec
[0], 0) == NULL_RTX
)
12272 if (GET_MODE (XEXP (rvec
[0], 0)) == mode
)
12273 return XEXP (rvec
[0], 0);
12276 return gen_rtx_PARALLEL (mode
, gen_rtvec_v (k
, rvec
));
12279 /* Determine where to put an argument to a function.
12280 Value is zero to push the argument on the stack,
12281 or a hard register in which to store the argument.
12283 MODE is the argument's machine mode.
12284 TYPE is the data type of the argument (as a tree).
12285 This is null for libcalls where that information may
12287 CUM is a variable of type CUMULATIVE_ARGS which gives info about
12288 the preceding args and about the function being called. It is
12289 not modified in this routine.
12290 NAMED is nonzero if this argument is a named parameter
12291 (otherwise it is an extra parameter matching an ellipsis).
12293 On RS/6000 the first eight words of non-FP are normally in registers
12294 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
12295 Under V.4, the first 8 FP args are in registers.
12297 If this is floating-point and no prototype is specified, we use
12298 both an FP and integer register (or possibly FP reg and stack). Library
12299 functions (when CALL_LIBCALL is set) always have the proper types for args,
12300 so we can pass the FP value just in one register. emit_library_function
12301 doesn't support PARALLEL anyway.
12303 Note that for args passed by reference, function_arg will be called
12304 with MODE and TYPE set to that of the pointer to the arg, not the arg
12308 rs6000_function_arg (cumulative_args_t cum_v
, machine_mode mode
,
12309 const_tree type
, bool named
)
12311 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
12312 enum rs6000_abi abi
= DEFAULT_ABI
;
12313 machine_mode elt_mode
;
12316 /* Return a marker to indicate whether CR1 needs to set or clear the
12317 bit that V.4 uses to say fp args were passed in registers.
12318 Assume that we don't need the marker for software floating point,
12319 or compiler generated library calls. */
12320 if (mode
== VOIDmode
)
12323 && (cum
->call_cookie
& CALL_LIBCALL
) == 0
12325 || (cum
->nargs_prototype
< 0
12326 && (cum
->prototype
|| TARGET_NO_PROTOTYPE
)))
12327 && TARGET_HARD_FLOAT
)
12328 return GEN_INT (cum
->call_cookie
12329 | ((cum
->fregno
== FP_ARG_MIN_REG
)
12330 ? CALL_V4_SET_FP_ARGS
12331 : CALL_V4_CLEAR_FP_ARGS
));
12333 return GEN_INT (cum
->call_cookie
& ~CALL_LIBCALL
);
12336 rs6000_discover_homogeneous_aggregate (mode
, type
, &elt_mode
, &n_elts
);
12338 if (TARGET_MACHO
&& rs6000_darwin64_struct_check_p (mode
, type
))
12340 rtx rslt
= rs6000_darwin64_record_arg (cum
, type
, named
, /*retval= */false);
12341 if (rslt
!= NULL_RTX
)
12343 /* Else fall through to usual handling. */
12346 if (USE_ALTIVEC_FOR_ARG_P (cum
, elt_mode
, named
))
12348 rtx rvec
[GP_ARG_NUM_REG
+ AGGR_ARG_NUM_REG
+ 1];
12352 /* Do we also need to pass this argument in the parameter save area?
12353 Library support functions for IEEE 128-bit are assumed to not need the
12354 value passed both in GPRs and in vector registers. */
12355 if (TARGET_64BIT
&& !cum
->prototype
12356 && (!cum
->libcall
|| !FLOAT128_VECTOR_P (elt_mode
)))
12358 int align_words
= ROUND_UP (cum
->words
, 2);
12359 k
= rs6000_psave_function_arg (mode
, type
, align_words
, rvec
);
12362 /* Describe where this argument goes in the vector registers. */
12363 for (i
= 0; i
< n_elts
&& cum
->vregno
+ i
<= ALTIVEC_ARG_MAX_REG
; i
++)
12365 r
= gen_rtx_REG (elt_mode
, cum
->vregno
+ i
);
12366 off
= GEN_INT (i
* GET_MODE_SIZE (elt_mode
));
12367 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
12370 return rs6000_finish_function_arg (mode
, rvec
, k
);
12372 else if (TARGET_ALTIVEC_ABI
12373 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode
)
12374 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
12375 && int_size_in_bytes (type
) == 16)))
12377 if (named
|| abi
== ABI_V4
)
12381 /* Vector parameters to varargs functions under AIX or Darwin
12382 get passed in memory and possibly also in GPRs. */
12383 int align
, align_words
, n_words
;
12384 machine_mode part_mode
;
12386 /* Vector parameters must be 16-byte aligned. In 32-bit
12387 mode this means we need to take into account the offset
12388 to the parameter save area. In 64-bit mode, they just
12389 have to start on an even word, since the parameter save
12390 area is 16-byte aligned. */
12392 align
= -(rs6000_parm_offset () + cum
->words
) & 3;
12394 align
= cum
->words
& 1;
12395 align_words
= cum
->words
+ align
;
12397 /* Out of registers? Memory, then. */
12398 if (align_words
>= GP_ARG_NUM_REG
)
12401 if (TARGET_32BIT
&& TARGET_POWERPC64
)
12402 return rs6000_mixed_function_arg (mode
, type
, align_words
);
12404 /* The vector value goes in GPRs. Only the part of the
12405 value in GPRs is reported here. */
12407 n_words
= rs6000_arg_size (mode
, type
);
12408 if (align_words
+ n_words
> GP_ARG_NUM_REG
)
12409 /* Fortunately, there are only two possibilities, the value
12410 is either wholly in GPRs or half in GPRs and half not. */
12411 part_mode
= DImode
;
12413 return gen_rtx_REG (part_mode
, GP_ARG_MIN_REG
+ align_words
);
12417 else if (abi
== ABI_V4
)
12419 if (abi_v4_pass_in_fpr (mode
))
12421 /* _Decimal128 must use an even/odd register pair. This assumes
12422 that the register number is odd when fregno is odd. */
12423 if (mode
== TDmode
&& (cum
->fregno
% 2) == 1)
12426 if (cum
->fregno
+ (FLOAT128_2REG_P (mode
) ? 1 : 0)
12427 <= FP_ARG_V4_MAX_REG
)
12428 return gen_rtx_REG (mode
, cum
->fregno
);
12434 int n_words
= rs6000_arg_size (mode
, type
);
12435 int gregno
= cum
->sysv_gregno
;
12437 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
12438 As does any other 2 word item such as complex int due to a
12439 historical mistake. */
12441 gregno
+= (1 - gregno
) & 1;
12443 /* Multi-reg args are not split between registers and stack. */
12444 if (gregno
+ n_words
- 1 > GP_ARG_MAX_REG
)
12447 if (TARGET_32BIT
&& TARGET_POWERPC64
)
12448 return rs6000_mixed_function_arg (mode
, type
,
12449 gregno
- GP_ARG_MIN_REG
);
12450 return gen_rtx_REG (mode
, gregno
);
12455 int align_words
= rs6000_parm_start (mode
, type
, cum
->words
);
12457 /* _Decimal128 must be passed in an even/odd float register pair.
12458 This assumes that the register number is odd when fregno is odd. */
12459 if (elt_mode
== TDmode
&& (cum
->fregno
% 2) == 1)
12462 if (USE_FP_FOR_ARG_P (cum
, elt_mode
))
12464 rtx rvec
[GP_ARG_NUM_REG
+ AGGR_ARG_NUM_REG
+ 1];
12467 unsigned long n_fpreg
= (GET_MODE_SIZE (elt_mode
) + 7) >> 3;
12470 /* Do we also need to pass this argument in the parameter
12472 if (type
&& (cum
->nargs_prototype
<= 0
12473 || ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
12474 && TARGET_XL_COMPAT
12475 && align_words
>= GP_ARG_NUM_REG
)))
12476 k
= rs6000_psave_function_arg (mode
, type
, align_words
, rvec
);
12478 /* Describe where this argument goes in the fprs. */
12479 for (i
= 0; i
< n_elts
12480 && cum
->fregno
+ i
* n_fpreg
<= FP_ARG_MAX_REG
; i
++)
12482 /* Check if the argument is split over registers and memory.
12483 This can only ever happen for long double or _Decimal128;
12484 complex types are handled via split_complex_arg. */
12485 machine_mode fmode
= elt_mode
;
12486 if (cum
->fregno
+ (i
+ 1) * n_fpreg
> FP_ARG_MAX_REG
+ 1)
12488 gcc_assert (FLOAT128_2REG_P (fmode
));
12489 fmode
= DECIMAL_FLOAT_MODE_P (fmode
) ? DDmode
: DFmode
;
12492 r
= gen_rtx_REG (fmode
, cum
->fregno
+ i
* n_fpreg
);
12493 off
= GEN_INT (i
* GET_MODE_SIZE (elt_mode
));
12494 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
12497 /* If there were not enough FPRs to hold the argument, the rest
12498 usually goes into memory. However, if the current position
12499 is still within the register parameter area, a portion may
12500 actually have to go into GPRs.
12502 Note that it may happen that the portion of the argument
12503 passed in the first "half" of the first GPR was already
12504 passed in the last FPR as well.
12506 For unnamed arguments, we already set up GPRs to cover the
12507 whole argument in rs6000_psave_function_arg, so there is
12508 nothing further to do at this point. */
12509 fpr_words
= (i
* GET_MODE_SIZE (elt_mode
)) / (TARGET_32BIT
? 4 : 8);
12510 if (i
< n_elts
&& align_words
+ fpr_words
< GP_ARG_NUM_REG
12511 && cum
->nargs_prototype
> 0)
12513 static bool warned
;
12515 machine_mode rmode
= TARGET_32BIT
? SImode
: DImode
;
12516 int n_words
= rs6000_arg_size (mode
, type
);
12518 align_words
+= fpr_words
;
12519 n_words
-= fpr_words
;
12523 r
= gen_rtx_REG (rmode
, GP_ARG_MIN_REG
+ align_words
);
12524 off
= GEN_INT (fpr_words
++ * GET_MODE_SIZE (rmode
));
12525 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
12527 while (++align_words
< GP_ARG_NUM_REG
&& --n_words
!= 0);
12529 if (!warned
&& warn_psabi
)
12532 inform (input_location
,
12533 "the ABI of passing homogeneous float aggregates"
12534 " has changed in GCC 5");
12538 return rs6000_finish_function_arg (mode
, rvec
, k
);
12540 else if (align_words
< GP_ARG_NUM_REG
)
12542 if (TARGET_32BIT
&& TARGET_POWERPC64
)
12543 return rs6000_mixed_function_arg (mode
, type
, align_words
);
12545 return gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
12552 /* For an arg passed partly in registers and partly in memory, this is
12553 the number of bytes passed in registers. For args passed entirely in
12554 registers or entirely in memory, zero. When an arg is described by a
12555 PARALLEL, perhaps using more than one register type, this function
12556 returns the number of bytes used by the first element of the PARALLEL. */
12559 rs6000_arg_partial_bytes (cumulative_args_t cum_v
, machine_mode mode
,
12560 tree type
, bool named
)
12562 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
12563 bool passed_in_gprs
= true;
12566 machine_mode elt_mode
;
12569 rs6000_discover_homogeneous_aggregate (mode
, type
, &elt_mode
, &n_elts
);
12571 if (DEFAULT_ABI
== ABI_V4
)
12574 if (USE_ALTIVEC_FOR_ARG_P (cum
, elt_mode
, named
))
12576 /* If we are passing this arg in the fixed parameter save area (gprs or
12577 memory) as well as VRs, we do not use the partial bytes mechanism;
12578 instead, rs6000_function_arg will return a PARALLEL including a memory
12579 element as necessary. Library support functions for IEEE 128-bit are
12580 assumed to not need the value passed both in GPRs and in vector
12582 if (TARGET_64BIT
&& !cum
->prototype
12583 && (!cum
->libcall
|| !FLOAT128_VECTOR_P (elt_mode
)))
12586 /* Otherwise, we pass in VRs only. Check for partial copies. */
12587 passed_in_gprs
= false;
12588 if (cum
->vregno
+ n_elts
> ALTIVEC_ARG_MAX_REG
+ 1)
12589 ret
= (ALTIVEC_ARG_MAX_REG
+ 1 - cum
->vregno
) * 16;
12592 /* In this complicated case we just disable the partial_nregs code. */
12593 if (TARGET_MACHO
&& rs6000_darwin64_struct_check_p (mode
, type
))
12596 align_words
= rs6000_parm_start (mode
, type
, cum
->words
);
12598 if (USE_FP_FOR_ARG_P (cum
, elt_mode
))
12600 unsigned long n_fpreg
= (GET_MODE_SIZE (elt_mode
) + 7) >> 3;
12602 /* If we are passing this arg in the fixed parameter save area
12603 (gprs or memory) as well as FPRs, we do not use the partial
12604 bytes mechanism; instead, rs6000_function_arg will return a
12605 PARALLEL including a memory element as necessary. */
12607 && (cum
->nargs_prototype
<= 0
12608 || ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
12609 && TARGET_XL_COMPAT
12610 && align_words
>= GP_ARG_NUM_REG
)))
12613 /* Otherwise, we pass in FPRs only. Check for partial copies. */
12614 passed_in_gprs
= false;
12615 if (cum
->fregno
+ n_elts
* n_fpreg
> FP_ARG_MAX_REG
+ 1)
12617 /* Compute number of bytes / words passed in FPRs. If there
12618 is still space available in the register parameter area
12619 *after* that amount, a part of the argument will be passed
12620 in GPRs. In that case, the total amount passed in any
12621 registers is equal to the amount that would have been passed
12622 in GPRs if everything were passed there, so we fall back to
12623 the GPR code below to compute the appropriate value. */
12624 int fpr
= ((FP_ARG_MAX_REG
+ 1 - cum
->fregno
)
12625 * MIN (8, GET_MODE_SIZE (elt_mode
)));
12626 int fpr_words
= fpr
/ (TARGET_32BIT
? 4 : 8);
12628 if (align_words
+ fpr_words
< GP_ARG_NUM_REG
)
12629 passed_in_gprs
= true;
12636 && align_words
< GP_ARG_NUM_REG
12637 && GP_ARG_NUM_REG
< align_words
+ rs6000_arg_size (mode
, type
))
12638 ret
= (GP_ARG_NUM_REG
- align_words
) * (TARGET_32BIT
? 4 : 8);
12640 if (ret
!= 0 && TARGET_DEBUG_ARG
)
12641 fprintf (stderr
, "rs6000_arg_partial_bytes: %d\n", ret
);
12646 /* A C expression that indicates when an argument must be passed by
12647 reference. If nonzero for an argument, a copy of that argument is
12648 made in memory and a pointer to the argument is passed instead of
12649 the argument itself. The pointer is passed in whatever way is
12650 appropriate for passing a pointer to that type.
12652 Under V.4, aggregates and long double are passed by reference.
12654 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
12655 reference unless the AltiVec vector extension ABI is in force.
12657 As an extension to all ABIs, variable sized types are passed by
12661 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED
,
12662 machine_mode mode
, const_tree type
,
12663 bool named ATTRIBUTE_UNUSED
)
12668 if (DEFAULT_ABI
== ABI_V4
&& TARGET_IEEEQUAD
12669 && FLOAT128_IEEE_P (TYPE_MODE (type
)))
12671 if (TARGET_DEBUG_ARG
)
12672 fprintf (stderr
, "function_arg_pass_by_reference: V4 IEEE 128-bit\n");
12676 if (DEFAULT_ABI
== ABI_V4
&& AGGREGATE_TYPE_P (type
))
12678 if (TARGET_DEBUG_ARG
)
12679 fprintf (stderr
, "function_arg_pass_by_reference: V4 aggregate\n");
12683 if (int_size_in_bytes (type
) < 0)
12685 if (TARGET_DEBUG_ARG
)
12686 fprintf (stderr
, "function_arg_pass_by_reference: variable size\n");
12690 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
12691 modes only exist for GCC vector types if -maltivec. */
12692 if (TARGET_32BIT
&& !TARGET_ALTIVEC_ABI
&& ALTIVEC_VECTOR_MODE (mode
))
12694 if (TARGET_DEBUG_ARG
)
12695 fprintf (stderr
, "function_arg_pass_by_reference: AltiVec\n");
12699 /* Pass synthetic vectors in memory. */
12700 if (TREE_CODE (type
) == VECTOR_TYPE
12701 && int_size_in_bytes (type
) > (TARGET_ALTIVEC_ABI
? 16 : 8))
12703 static bool warned_for_pass_big_vectors
= false;
12704 if (TARGET_DEBUG_ARG
)
12705 fprintf (stderr
, "function_arg_pass_by_reference: synthetic vector\n");
12706 if (!warned_for_pass_big_vectors
)
12708 warning (OPT_Wpsabi
, "GCC vector passed by reference: "
12709 "non-standard ABI extension with no compatibility "
12711 warned_for_pass_big_vectors
= true;
12719 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
12720 already processes. Return true if the parameter must be passed
12721 (fully or partially) on the stack. */
12724 rs6000_parm_needs_stack (cumulative_args_t args_so_far
, tree type
)
12730 /* Catch errors. */
12731 if (type
== NULL
|| type
== error_mark_node
)
12734 /* Handle types with no storage requirement. */
12735 if (TYPE_MODE (type
) == VOIDmode
)
12738 /* Handle complex types. */
12739 if (TREE_CODE (type
) == COMPLEX_TYPE
)
12740 return (rs6000_parm_needs_stack (args_so_far
, TREE_TYPE (type
))
12741 || rs6000_parm_needs_stack (args_so_far
, TREE_TYPE (type
)));
12743 /* Handle transparent aggregates. */
12744 if ((TREE_CODE (type
) == UNION_TYPE
|| TREE_CODE (type
) == RECORD_TYPE
)
12745 && TYPE_TRANSPARENT_AGGR (type
))
12746 type
= TREE_TYPE (first_field (type
));
12748 /* See if this arg was passed by invisible reference. */
12749 if (pass_by_reference (get_cumulative_args (args_so_far
),
12750 TYPE_MODE (type
), type
, true))
12751 type
= build_pointer_type (type
);
12753 /* Find mode as it is passed by the ABI. */
12754 unsignedp
= TYPE_UNSIGNED (type
);
12755 mode
= promote_mode (type
, TYPE_MODE (type
), &unsignedp
);
12757 /* If we must pass in stack, we need a stack. */
12758 if (rs6000_must_pass_in_stack (mode
, type
))
12761 /* If there is no incoming register, we need a stack. */
12762 entry_parm
= rs6000_function_arg (args_so_far
, mode
, type
, true);
12763 if (entry_parm
== NULL
)
12766 /* Likewise if we need to pass both in registers and on the stack. */
12767 if (GET_CODE (entry_parm
) == PARALLEL
12768 && XEXP (XVECEXP (entry_parm
, 0, 0), 0) == NULL_RTX
)
12771 /* Also true if we're partially in registers and partially not. */
12772 if (rs6000_arg_partial_bytes (args_so_far
, mode
, type
, true) != 0)
12775 /* Update info on where next arg arrives in registers. */
12776 rs6000_function_arg_advance (args_so_far
, mode
, type
, true);
12780 /* Return true if FUN has no prototype, has a variable argument
12781 list, or passes any parameter in memory. */
12784 rs6000_function_parms_need_stack (tree fun
, bool incoming
)
12786 tree fntype
, result
;
12787 CUMULATIVE_ARGS args_so_far_v
;
12788 cumulative_args_t args_so_far
;
12791 /* Must be a libcall, all of which only use reg parms. */
12796 fntype
= TREE_TYPE (fun
);
12798 /* Varargs functions need the parameter save area. */
12799 if ((!incoming
&& !prototype_p (fntype
)) || stdarg_p (fntype
))
12802 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v
, fntype
, NULL_RTX
);
12803 args_so_far
= pack_cumulative_args (&args_so_far_v
);
12805 /* When incoming, we will have been passed the function decl.
12806 It is necessary to use the decl to handle K&R style functions,
12807 where TYPE_ARG_TYPES may not be available. */
12810 gcc_assert (DECL_P (fun
));
12811 result
= DECL_RESULT (fun
);
12814 result
= TREE_TYPE (fntype
);
12816 if (result
&& aggregate_value_p (result
, fntype
))
12818 if (!TYPE_P (result
))
12819 result
= TREE_TYPE (result
);
12820 result
= build_pointer_type (result
);
12821 rs6000_parm_needs_stack (args_so_far
, result
);
12828 for (parm
= DECL_ARGUMENTS (fun
);
12829 parm
&& parm
!= void_list_node
;
12830 parm
= TREE_CHAIN (parm
))
12831 if (rs6000_parm_needs_stack (args_so_far
, TREE_TYPE (parm
)))
12836 function_args_iterator args_iter
;
12839 FOREACH_FUNCTION_ARGS (fntype
, arg_type
, args_iter
)
12840 if (rs6000_parm_needs_stack (args_so_far
, arg_type
))
12847 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
12848 usually a constant depending on the ABI. However, in the ELFv2 ABI
12849 the register parameter area is optional when calling a function that
12850 has a prototype is scope, has no variable argument list, and passes
12851 all parameters in registers. */
12854 rs6000_reg_parm_stack_space (tree fun
, bool incoming
)
12856 int reg_parm_stack_space
;
12858 switch (DEFAULT_ABI
)
12861 reg_parm_stack_space
= 0;
12866 reg_parm_stack_space
= TARGET_64BIT
? 64 : 32;
12870 /* ??? Recomputing this every time is a bit expensive. Is there
12871 a place to cache this information? */
12872 if (rs6000_function_parms_need_stack (fun
, incoming
))
12873 reg_parm_stack_space
= TARGET_64BIT
? 64 : 32;
12875 reg_parm_stack_space
= 0;
12879 return reg_parm_stack_space
;
12883 rs6000_move_block_from_reg (int regno
, rtx x
, int nregs
)
12886 machine_mode reg_mode
= TARGET_32BIT
? SImode
: DImode
;
12891 for (i
= 0; i
< nregs
; i
++)
12893 rtx tem
= adjust_address_nv (x
, reg_mode
, i
* GET_MODE_SIZE (reg_mode
));
12894 if (reload_completed
)
12896 if (! strict_memory_address_p (reg_mode
, XEXP (tem
, 0)))
12899 tem
= simplify_gen_subreg (reg_mode
, x
, BLKmode
,
12900 i
* GET_MODE_SIZE (reg_mode
));
12903 tem
= replace_equiv_address (tem
, XEXP (tem
, 0));
12907 emit_move_insn (tem
, gen_rtx_REG (reg_mode
, regno
+ i
));
12911 /* Perform any needed actions needed for a function that is receiving a
12912 variable number of arguments.
12916 MODE and TYPE are the mode and type of the current parameter.
12918 PRETEND_SIZE is a variable that should be set to the amount of stack
12919 that must be pushed by the prolog to pretend that our caller pushed
12922 Normally, this macro will push all remaining incoming registers on the
12923 stack and set PRETEND_SIZE to the length of the registers pushed. */
12926 setup_incoming_varargs (cumulative_args_t cum
, machine_mode mode
,
12927 tree type
, int *pretend_size ATTRIBUTE_UNUSED
,
12930 CUMULATIVE_ARGS next_cum
;
12931 int reg_size
= TARGET_32BIT
? 4 : 8;
12932 rtx save_area
= NULL_RTX
, mem
;
12933 int first_reg_offset
;
12934 alias_set_type set
;
12936 /* Skip the last named argument. */
12937 next_cum
= *get_cumulative_args (cum
);
12938 rs6000_function_arg_advance_1 (&next_cum
, mode
, type
, true, 0);
12940 if (DEFAULT_ABI
== ABI_V4
)
12942 first_reg_offset
= next_cum
.sysv_gregno
- GP_ARG_MIN_REG
;
12946 int gpr_reg_num
= 0, gpr_size
= 0, fpr_size
= 0;
12947 HOST_WIDE_INT offset
= 0;
12949 /* Try to optimize the size of the varargs save area.
12950 The ABI requires that ap.reg_save_area is doubleword
12951 aligned, but we don't need to allocate space for all
12952 the bytes, only those to which we actually will save
12954 if (cfun
->va_list_gpr_size
&& first_reg_offset
< GP_ARG_NUM_REG
)
12955 gpr_reg_num
= GP_ARG_NUM_REG
- first_reg_offset
;
12956 if (TARGET_HARD_FLOAT
12957 && next_cum
.fregno
<= FP_ARG_V4_MAX_REG
12958 && cfun
->va_list_fpr_size
)
12961 fpr_size
= (next_cum
.fregno
- FP_ARG_MIN_REG
)
12962 * UNITS_PER_FP_WORD
;
12963 if (cfun
->va_list_fpr_size
12964 < FP_ARG_V4_MAX_REG
+ 1 - next_cum
.fregno
)
12965 fpr_size
+= cfun
->va_list_fpr_size
* UNITS_PER_FP_WORD
;
12967 fpr_size
+= (FP_ARG_V4_MAX_REG
+ 1 - next_cum
.fregno
)
12968 * UNITS_PER_FP_WORD
;
12972 offset
= -((first_reg_offset
* reg_size
) & ~7);
12973 if (!fpr_size
&& gpr_reg_num
> cfun
->va_list_gpr_size
)
12975 gpr_reg_num
= cfun
->va_list_gpr_size
;
12976 if (reg_size
== 4 && (first_reg_offset
& 1))
12979 gpr_size
= (gpr_reg_num
* reg_size
+ 7) & ~7;
12982 offset
= - (int) (next_cum
.fregno
- FP_ARG_MIN_REG
)
12983 * UNITS_PER_FP_WORD
12984 - (int) (GP_ARG_NUM_REG
* reg_size
);
12986 if (gpr_size
+ fpr_size
)
12989 = assign_stack_local (BLKmode
, gpr_size
+ fpr_size
, 64);
12990 gcc_assert (GET_CODE (reg_save_area
) == MEM
);
12991 reg_save_area
= XEXP (reg_save_area
, 0);
12992 if (GET_CODE (reg_save_area
) == PLUS
)
12994 gcc_assert (XEXP (reg_save_area
, 0)
12995 == virtual_stack_vars_rtx
);
12996 gcc_assert (GET_CODE (XEXP (reg_save_area
, 1)) == CONST_INT
);
12997 offset
+= INTVAL (XEXP (reg_save_area
, 1));
13000 gcc_assert (reg_save_area
== virtual_stack_vars_rtx
);
13003 cfun
->machine
->varargs_save_offset
= offset
;
13004 save_area
= plus_constant (Pmode
, virtual_stack_vars_rtx
, offset
);
13009 first_reg_offset
= next_cum
.words
;
13010 save_area
= crtl
->args
.internal_arg_pointer
;
13012 if (targetm
.calls
.must_pass_in_stack (mode
, type
))
13013 first_reg_offset
+= rs6000_arg_size (TYPE_MODE (type
), type
);
13016 set
= get_varargs_alias_set ();
13017 if (! no_rtl
&& first_reg_offset
< GP_ARG_NUM_REG
13018 && cfun
->va_list_gpr_size
)
13020 int n_gpr
, nregs
= GP_ARG_NUM_REG
- first_reg_offset
;
13022 if (va_list_gpr_counter_field
)
13023 /* V4 va_list_gpr_size counts number of registers needed. */
13024 n_gpr
= cfun
->va_list_gpr_size
;
13026 /* char * va_list instead counts number of bytes needed. */
13027 n_gpr
= (cfun
->va_list_gpr_size
+ reg_size
- 1) / reg_size
;
13032 mem
= gen_rtx_MEM (BLKmode
,
13033 plus_constant (Pmode
, save_area
,
13034 first_reg_offset
* reg_size
));
13035 MEM_NOTRAP_P (mem
) = 1;
13036 set_mem_alias_set (mem
, set
);
13037 set_mem_align (mem
, BITS_PER_WORD
);
13039 rs6000_move_block_from_reg (GP_ARG_MIN_REG
+ first_reg_offset
, mem
,
13043 /* Save FP registers if needed. */
13044 if (DEFAULT_ABI
== ABI_V4
13045 && TARGET_HARD_FLOAT
13047 && next_cum
.fregno
<= FP_ARG_V4_MAX_REG
13048 && cfun
->va_list_fpr_size
)
13050 int fregno
= next_cum
.fregno
, nregs
;
13051 rtx cr1
= gen_rtx_REG (CCmode
, CR1_REGNO
);
13052 rtx lab
= gen_label_rtx ();
13053 int off
= (GP_ARG_NUM_REG
* reg_size
) + ((fregno
- FP_ARG_MIN_REG
)
13054 * UNITS_PER_FP_WORD
);
13057 (gen_rtx_SET (pc_rtx
,
13058 gen_rtx_IF_THEN_ELSE (VOIDmode
,
13059 gen_rtx_NE (VOIDmode
, cr1
,
13061 gen_rtx_LABEL_REF (VOIDmode
, lab
),
13065 fregno
<= FP_ARG_V4_MAX_REG
&& nregs
< cfun
->va_list_fpr_size
;
13066 fregno
++, off
+= UNITS_PER_FP_WORD
, nregs
++)
13068 mem
= gen_rtx_MEM ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
13070 plus_constant (Pmode
, save_area
, off
));
13071 MEM_NOTRAP_P (mem
) = 1;
13072 set_mem_alias_set (mem
, set
);
13073 set_mem_align (mem
, GET_MODE_ALIGNMENT (
13074 (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
13075 ? DFmode
: SFmode
));
13076 emit_move_insn (mem
, gen_rtx_REG (
13077 (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
13078 ? DFmode
: SFmode
, fregno
));
13085 /* Create the va_list data type. */
13088 rs6000_build_builtin_va_list (void)
13090 tree f_gpr
, f_fpr
, f_res
, f_ovf
, f_sav
, record
, type_decl
;
13092 /* For AIX, prefer 'char *' because that's what the system
13093 header files like. */
13094 if (DEFAULT_ABI
!= ABI_V4
)
13095 return build_pointer_type (char_type_node
);
13097 record
= (*lang_hooks
.types
.make_type
) (RECORD_TYPE
);
13098 type_decl
= build_decl (BUILTINS_LOCATION
, TYPE_DECL
,
13099 get_identifier ("__va_list_tag"), record
);
13101 f_gpr
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
, get_identifier ("gpr"),
13102 unsigned_char_type_node
);
13103 f_fpr
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
, get_identifier ("fpr"),
13104 unsigned_char_type_node
);
13105 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
13106 every user file. */
13107 f_res
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
,
13108 get_identifier ("reserved"), short_unsigned_type_node
);
13109 f_ovf
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
,
13110 get_identifier ("overflow_arg_area"),
13112 f_sav
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
,
13113 get_identifier ("reg_save_area"),
13116 va_list_gpr_counter_field
= f_gpr
;
13117 va_list_fpr_counter_field
= f_fpr
;
13119 DECL_FIELD_CONTEXT (f_gpr
) = record
;
13120 DECL_FIELD_CONTEXT (f_fpr
) = record
;
13121 DECL_FIELD_CONTEXT (f_res
) = record
;
13122 DECL_FIELD_CONTEXT (f_ovf
) = record
;
13123 DECL_FIELD_CONTEXT (f_sav
) = record
;
13125 TYPE_STUB_DECL (record
) = type_decl
;
13126 TYPE_NAME (record
) = type_decl
;
13127 TYPE_FIELDS (record
) = f_gpr
;
13128 DECL_CHAIN (f_gpr
) = f_fpr
;
13129 DECL_CHAIN (f_fpr
) = f_res
;
13130 DECL_CHAIN (f_res
) = f_ovf
;
13131 DECL_CHAIN (f_ovf
) = f_sav
;
13133 layout_type (record
);
13135 /* The correct type is an array type of one element. */
13136 return build_array_type (record
, build_index_type (size_zero_node
));
13139 /* Implement va_start. */
13142 rs6000_va_start (tree valist
, rtx nextarg
)
13144 HOST_WIDE_INT words
, n_gpr
, n_fpr
;
13145 tree f_gpr
, f_fpr
, f_res
, f_ovf
, f_sav
;
13146 tree gpr
, fpr
, ovf
, sav
, t
;
13148 /* Only SVR4 needs something special. */
13149 if (DEFAULT_ABI
!= ABI_V4
)
13151 std_expand_builtin_va_start (valist
, nextarg
);
13155 f_gpr
= TYPE_FIELDS (TREE_TYPE (va_list_type_node
));
13156 f_fpr
= DECL_CHAIN (f_gpr
);
13157 f_res
= DECL_CHAIN (f_fpr
);
13158 f_ovf
= DECL_CHAIN (f_res
);
13159 f_sav
= DECL_CHAIN (f_ovf
);
13161 valist
= build_simple_mem_ref (valist
);
13162 gpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_gpr
), valist
, f_gpr
, NULL_TREE
);
13163 fpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_fpr
), unshare_expr (valist
),
13165 ovf
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovf
), unshare_expr (valist
),
13167 sav
= build3 (COMPONENT_REF
, TREE_TYPE (f_sav
), unshare_expr (valist
),
13170 /* Count number of gp and fp argument registers used. */
13171 words
= crtl
->args
.info
.words
;
13172 n_gpr
= MIN (crtl
->args
.info
.sysv_gregno
- GP_ARG_MIN_REG
,
13174 n_fpr
= MIN (crtl
->args
.info
.fregno
- FP_ARG_MIN_REG
,
13177 if (TARGET_DEBUG_ARG
)
13178 fprintf (stderr
, "va_start: words = " HOST_WIDE_INT_PRINT_DEC
", n_gpr = "
13179 HOST_WIDE_INT_PRINT_DEC
", n_fpr = " HOST_WIDE_INT_PRINT_DEC
"\n",
13180 words
, n_gpr
, n_fpr
);
13182 if (cfun
->va_list_gpr_size
)
13184 t
= build2 (MODIFY_EXPR
, TREE_TYPE (gpr
), gpr
,
13185 build_int_cst (NULL_TREE
, n_gpr
));
13186 TREE_SIDE_EFFECTS (t
) = 1;
13187 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
13190 if (cfun
->va_list_fpr_size
)
13192 t
= build2 (MODIFY_EXPR
, TREE_TYPE (fpr
), fpr
,
13193 build_int_cst (NULL_TREE
, n_fpr
));
13194 TREE_SIDE_EFFECTS (t
) = 1;
13195 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
13197 #ifdef HAVE_AS_GNU_ATTRIBUTE
13198 if (call_ABI_of_interest (cfun
->decl
))
13199 rs6000_passes_float
= true;
13203 /* Find the overflow area. */
13204 t
= make_tree (TREE_TYPE (ovf
), crtl
->args
.internal_arg_pointer
);
13206 t
= fold_build_pointer_plus_hwi (t
, words
* MIN_UNITS_PER_WORD
);
13207 t
= build2 (MODIFY_EXPR
, TREE_TYPE (ovf
), ovf
, t
);
13208 TREE_SIDE_EFFECTS (t
) = 1;
13209 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
13211 /* If there were no va_arg invocations, don't set up the register
13213 if (!cfun
->va_list_gpr_size
13214 && !cfun
->va_list_fpr_size
13215 && n_gpr
< GP_ARG_NUM_REG
13216 && n_fpr
< FP_ARG_V4_MAX_REG
)
13219 /* Find the register save area. */
13220 t
= make_tree (TREE_TYPE (sav
), virtual_stack_vars_rtx
);
13221 if (cfun
->machine
->varargs_save_offset
)
13222 t
= fold_build_pointer_plus_hwi (t
, cfun
->machine
->varargs_save_offset
);
13223 t
= build2 (MODIFY_EXPR
, TREE_TYPE (sav
), sav
, t
);
13224 TREE_SIDE_EFFECTS (t
) = 1;
13225 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
13228 /* Implement va_arg. */
13231 rs6000_gimplify_va_arg (tree valist
, tree type
, gimple_seq
*pre_p
,
13232 gimple_seq
*post_p
)
13234 tree f_gpr
, f_fpr
, f_res
, f_ovf
, f_sav
;
13235 tree gpr
, fpr
, ovf
, sav
, reg
, t
, u
;
13236 int size
, rsize
, n_reg
, sav_ofs
, sav_scale
;
13237 tree lab_false
, lab_over
, addr
;
13239 tree ptrtype
= build_pointer_type_for_mode (type
, ptr_mode
, true);
13243 if (pass_by_reference (NULL
, TYPE_MODE (type
), type
, false))
13245 t
= rs6000_gimplify_va_arg (valist
, ptrtype
, pre_p
, post_p
);
13246 return build_va_arg_indirect_ref (t
);
13249 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
13250 earlier version of gcc, with the property that it always applied alignment
13251 adjustments to the va-args (even for zero-sized types). The cheapest way
13252 to deal with this is to replicate the effect of the part of
13253 std_gimplify_va_arg_expr that carries out the align adjust, for the case
13255 We don't need to check for pass-by-reference because of the test above.
13256 We can return a simplifed answer, since we know there's no offset to add. */
13259 && rs6000_darwin64_abi
)
13260 || DEFAULT_ABI
== ABI_ELFv2
13261 || (DEFAULT_ABI
== ABI_AIX
&& !rs6000_compat_align_parm
))
13262 && integer_zerop (TYPE_SIZE (type
)))
13264 unsigned HOST_WIDE_INT align
, boundary
;
13265 tree valist_tmp
= get_initialized_tmp_var (valist
, pre_p
, NULL
);
13266 align
= PARM_BOUNDARY
/ BITS_PER_UNIT
;
13267 boundary
= rs6000_function_arg_boundary (TYPE_MODE (type
), type
);
13268 if (boundary
> MAX_SUPPORTED_STACK_ALIGNMENT
)
13269 boundary
= MAX_SUPPORTED_STACK_ALIGNMENT
;
13270 boundary
/= BITS_PER_UNIT
;
13271 if (boundary
> align
)
13274 /* This updates arg ptr by the amount that would be necessary
13275 to align the zero-sized (but not zero-alignment) item. */
13276 t
= build2 (MODIFY_EXPR
, TREE_TYPE (valist
), valist_tmp
,
13277 fold_build_pointer_plus_hwi (valist_tmp
, boundary
- 1));
13278 gimplify_and_add (t
, pre_p
);
13280 t
= fold_convert (sizetype
, valist_tmp
);
13281 t
= build2 (MODIFY_EXPR
, TREE_TYPE (valist
), valist_tmp
,
13282 fold_convert (TREE_TYPE (valist
),
13283 fold_build2 (BIT_AND_EXPR
, sizetype
, t
,
13284 size_int (-boundary
))));
13285 t
= build2 (MODIFY_EXPR
, TREE_TYPE (valist
), valist
, t
);
13286 gimplify_and_add (t
, pre_p
);
13288 /* Since it is zero-sized there's no increment for the item itself. */
13289 valist_tmp
= fold_convert (build_pointer_type (type
), valist_tmp
);
13290 return build_va_arg_indirect_ref (valist_tmp
);
13293 if (DEFAULT_ABI
!= ABI_V4
)
13295 if (targetm
.calls
.split_complex_arg
&& TREE_CODE (type
) == COMPLEX_TYPE
)
13297 tree elem_type
= TREE_TYPE (type
);
13298 machine_mode elem_mode
= TYPE_MODE (elem_type
);
13299 int elem_size
= GET_MODE_SIZE (elem_mode
);
13301 if (elem_size
< UNITS_PER_WORD
)
13303 tree real_part
, imag_part
;
13304 gimple_seq post
= NULL
;
13306 real_part
= rs6000_gimplify_va_arg (valist
, elem_type
, pre_p
,
13308 /* Copy the value into a temporary, lest the formal temporary
13309 be reused out from under us. */
13310 real_part
= get_initialized_tmp_var (real_part
, pre_p
, &post
);
13311 gimple_seq_add_seq (pre_p
, post
);
13313 imag_part
= rs6000_gimplify_va_arg (valist
, elem_type
, pre_p
,
13316 return build2 (COMPLEX_EXPR
, type
, real_part
, imag_part
);
13320 return std_gimplify_va_arg_expr (valist
, type
, pre_p
, post_p
);
13323 f_gpr
= TYPE_FIELDS (TREE_TYPE (va_list_type_node
));
13324 f_fpr
= DECL_CHAIN (f_gpr
);
13325 f_res
= DECL_CHAIN (f_fpr
);
13326 f_ovf
= DECL_CHAIN (f_res
);
13327 f_sav
= DECL_CHAIN (f_ovf
);
13329 gpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_gpr
), valist
, f_gpr
, NULL_TREE
);
13330 fpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_fpr
), unshare_expr (valist
),
13332 ovf
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovf
), unshare_expr (valist
),
13334 sav
= build3 (COMPONENT_REF
, TREE_TYPE (f_sav
), unshare_expr (valist
),
13337 size
= int_size_in_bytes (type
);
13338 rsize
= (size
+ 3) / 4;
13339 int pad
= 4 * rsize
- size
;
13342 machine_mode mode
= TYPE_MODE (type
);
13343 if (abi_v4_pass_in_fpr (mode
))
13345 /* FP args go in FP registers, if present. */
13347 n_reg
= (size
+ 7) / 8;
13348 sav_ofs
= ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
) ? 8 : 4) * 4;
13349 sav_scale
= ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
) ? 8 : 4);
13350 if (mode
!= SFmode
&& mode
!= SDmode
)
13355 /* Otherwise into GP registers. */
13364 /* Pull the value out of the saved registers.... */
13367 addr
= create_tmp_var (ptr_type_node
, "addr");
13369 /* AltiVec vectors never go in registers when -mabi=altivec. */
13370 if (TARGET_ALTIVEC_ABI
&& ALTIVEC_VECTOR_MODE (mode
))
13374 lab_false
= create_artificial_label (input_location
);
13375 lab_over
= create_artificial_label (input_location
);
13377 /* Long long is aligned in the registers. As are any other 2 gpr
13378 item such as complex int due to a historical mistake. */
13380 if (n_reg
== 2 && reg
== gpr
)
13383 u
= build2 (BIT_AND_EXPR
, TREE_TYPE (reg
), unshare_expr (reg
),
13384 build_int_cst (TREE_TYPE (reg
), n_reg
- 1));
13385 u
= build2 (POSTINCREMENT_EXPR
, TREE_TYPE (reg
),
13386 unshare_expr (reg
), u
);
13388 /* _Decimal128 is passed in even/odd fpr pairs; the stored
13389 reg number is 0 for f1, so we want to make it odd. */
13390 else if (reg
== fpr
&& mode
== TDmode
)
13392 t
= build2 (BIT_IOR_EXPR
, TREE_TYPE (reg
), unshare_expr (reg
),
13393 build_int_cst (TREE_TYPE (reg
), 1));
13394 u
= build2 (MODIFY_EXPR
, void_type_node
, unshare_expr (reg
), t
);
13397 t
= fold_convert (TREE_TYPE (reg
), size_int (8 - n_reg
+ 1));
13398 t
= build2 (GE_EXPR
, boolean_type_node
, u
, t
);
13399 u
= build1 (GOTO_EXPR
, void_type_node
, lab_false
);
13400 t
= build3 (COND_EXPR
, void_type_node
, t
, u
, NULL_TREE
);
13401 gimplify_and_add (t
, pre_p
);
13405 t
= fold_build_pointer_plus_hwi (sav
, sav_ofs
);
13407 u
= build2 (POSTINCREMENT_EXPR
, TREE_TYPE (reg
), unshare_expr (reg
),
13408 build_int_cst (TREE_TYPE (reg
), n_reg
));
13409 u
= fold_convert (sizetype
, u
);
13410 u
= build2 (MULT_EXPR
, sizetype
, u
, size_int (sav_scale
));
13411 t
= fold_build_pointer_plus (t
, u
);
13413 /* _Decimal32 varargs are located in the second word of the 64-bit
13414 FP register for 32-bit binaries. */
13415 if (TARGET_32BIT
&& TARGET_HARD_FLOAT
&& mode
== SDmode
)
13416 t
= fold_build_pointer_plus_hwi (t
, size
);
13418 /* Args are passed right-aligned. */
13419 if (BYTES_BIG_ENDIAN
)
13420 t
= fold_build_pointer_plus_hwi (t
, pad
);
13422 gimplify_assign (addr
, t
, pre_p
);
13424 gimple_seq_add_stmt (pre_p
, gimple_build_goto (lab_over
));
13426 stmt
= gimple_build_label (lab_false
);
13427 gimple_seq_add_stmt (pre_p
, stmt
);
13429 if ((n_reg
== 2 && !regalign
) || n_reg
> 2)
13431 /* Ensure that we don't find any more args in regs.
13432 Alignment has taken care of for special cases. */
13433 gimplify_assign (reg
, build_int_cst (TREE_TYPE (reg
), 8), pre_p
);
13437 /* ... otherwise out of the overflow area. */
13439 /* Care for on-stack alignment if needed. */
13443 t
= fold_build_pointer_plus_hwi (t
, align
- 1);
13444 t
= build2 (BIT_AND_EXPR
, TREE_TYPE (t
), t
,
13445 build_int_cst (TREE_TYPE (t
), -align
));
13448 /* Args are passed right-aligned. */
13449 if (BYTES_BIG_ENDIAN
)
13450 t
= fold_build_pointer_plus_hwi (t
, pad
);
13452 gimplify_expr (&t
, pre_p
, NULL
, is_gimple_val
, fb_rvalue
);
13454 gimplify_assign (unshare_expr (addr
), t
, pre_p
);
13456 t
= fold_build_pointer_plus_hwi (t
, size
);
13457 gimplify_assign (unshare_expr (ovf
), t
, pre_p
);
13461 stmt
= gimple_build_label (lab_over
);
13462 gimple_seq_add_stmt (pre_p
, stmt
);
13465 if (STRICT_ALIGNMENT
13466 && (TYPE_ALIGN (type
)
13467 > (unsigned) BITS_PER_UNIT
* (align
< 4 ? 4 : align
)))
13469 /* The value (of type complex double, for example) may not be
13470 aligned in memory in the saved registers, so copy via a
13471 temporary. (This is the same code as used for SPARC.) */
13472 tree tmp
= create_tmp_var (type
, "va_arg_tmp");
13473 tree dest_addr
= build_fold_addr_expr (tmp
);
13475 tree copy
= build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY
),
13476 3, dest_addr
, addr
, size_int (rsize
* 4));
13478 gimplify_and_add (copy
, pre_p
);
13482 addr
= fold_convert (ptrtype
, addr
);
13483 return build_va_arg_indirect_ref (addr
);
13489 def_builtin (const char *name
, tree type
, enum rs6000_builtins code
)
13492 unsigned classify
= rs6000_builtin_info
[(int)code
].attr
;
13493 const char *attr_string
= "";
13495 gcc_assert (name
!= NULL
);
13496 gcc_assert (IN_RANGE ((int)code
, 0, (int)RS6000_BUILTIN_COUNT
));
13498 if (rs6000_builtin_decls
[(int)code
])
13499 fatal_error (input_location
,
13500 "internal error: builtin function %qs already processed",
13503 rs6000_builtin_decls
[(int)code
] = t
=
13504 add_builtin_function (name
, type
, (int)code
, BUILT_IN_MD
, NULL
, NULL_TREE
);
13506 /* Set any special attributes. */
13507 if ((classify
& RS6000_BTC_CONST
) != 0)
13509 /* const function, function only depends on the inputs. */
13510 TREE_READONLY (t
) = 1;
13511 TREE_NOTHROW (t
) = 1;
13512 attr_string
= ", const";
13514 else if ((classify
& RS6000_BTC_PURE
) != 0)
13516 /* pure function, function can read global memory, but does not set any
13518 DECL_PURE_P (t
) = 1;
13519 TREE_NOTHROW (t
) = 1;
13520 attr_string
= ", pure";
13522 else if ((classify
& RS6000_BTC_FP
) != 0)
13524 /* Function is a math function. If rounding mode is on, then treat the
13525 function as not reading global memory, but it can have arbitrary side
13526 effects. If it is off, then assume the function is a const function.
13527 This mimics the ATTR_MATHFN_FPROUNDING attribute in
13528 builtin-attribute.def that is used for the math functions. */
13529 TREE_NOTHROW (t
) = 1;
13530 if (flag_rounding_math
)
13532 DECL_PURE_P (t
) = 1;
13533 DECL_IS_NOVOPS (t
) = 1;
13534 attr_string
= ", fp, pure";
13538 TREE_READONLY (t
) = 1;
13539 attr_string
= ", fp, const";
13542 else if ((classify
& RS6000_BTC_ATTR_MASK
) != 0)
13543 gcc_unreachable ();
13545 if (TARGET_DEBUG_BUILTIN
)
13546 fprintf (stderr
, "rs6000_builtin, code = %4d, %s%s\n",
13547 (int)code
, name
, attr_string
);
13550 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
13552 #undef RS6000_BUILTIN_0
13553 #undef RS6000_BUILTIN_1
13554 #undef RS6000_BUILTIN_2
13555 #undef RS6000_BUILTIN_3
13556 #undef RS6000_BUILTIN_A
13557 #undef RS6000_BUILTIN_D
13558 #undef RS6000_BUILTIN_H
13559 #undef RS6000_BUILTIN_P
13560 #undef RS6000_BUILTIN_Q
13561 #undef RS6000_BUILTIN_X
13563 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13564 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13565 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13566 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
13567 { MASK, ICODE, NAME, ENUM },
13569 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13570 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13571 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13572 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13573 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13574 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13576 static const struct builtin_description bdesc_3arg
[] =
13578 #include "rs6000-builtin.def"
13581 /* DST operations: void foo (void *, const int, const char). */
13583 #undef RS6000_BUILTIN_0
13584 #undef RS6000_BUILTIN_1
13585 #undef RS6000_BUILTIN_2
13586 #undef RS6000_BUILTIN_3
13587 #undef RS6000_BUILTIN_A
13588 #undef RS6000_BUILTIN_D
13589 #undef RS6000_BUILTIN_H
13590 #undef RS6000_BUILTIN_P
13591 #undef RS6000_BUILTIN_Q
13592 #undef RS6000_BUILTIN_X
13594 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13595 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13596 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13597 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13598 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13599 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
13600 { MASK, ICODE, NAME, ENUM },
13602 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13603 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13604 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13605 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13607 static const struct builtin_description bdesc_dst
[] =
13609 #include "rs6000-builtin.def"
13612 /* Simple binary operations: VECc = foo (VECa, VECb). */
13614 #undef RS6000_BUILTIN_0
13615 #undef RS6000_BUILTIN_1
13616 #undef RS6000_BUILTIN_2
13617 #undef RS6000_BUILTIN_3
13618 #undef RS6000_BUILTIN_A
13619 #undef RS6000_BUILTIN_D
13620 #undef RS6000_BUILTIN_H
13621 #undef RS6000_BUILTIN_P
13622 #undef RS6000_BUILTIN_Q
13623 #undef RS6000_BUILTIN_X
13625 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13626 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13627 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
13628 { MASK, ICODE, NAME, ENUM },
13630 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13631 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13632 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13633 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13634 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13635 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13636 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13638 static const struct builtin_description bdesc_2arg
[] =
13640 #include "rs6000-builtin.def"
13643 #undef RS6000_BUILTIN_0
13644 #undef RS6000_BUILTIN_1
13645 #undef RS6000_BUILTIN_2
13646 #undef RS6000_BUILTIN_3
13647 #undef RS6000_BUILTIN_A
13648 #undef RS6000_BUILTIN_D
13649 #undef RS6000_BUILTIN_H
13650 #undef RS6000_BUILTIN_P
13651 #undef RS6000_BUILTIN_Q
13652 #undef RS6000_BUILTIN_X
13654 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13655 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13656 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13657 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13658 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13659 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13660 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13661 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
13662 { MASK, ICODE, NAME, ENUM },
13664 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13665 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13667 /* AltiVec predicates. */
13669 static const struct builtin_description bdesc_altivec_preds
[] =
13671 #include "rs6000-builtin.def"
13674 /* PAIRED predicates. */
13675 #undef RS6000_BUILTIN_0
13676 #undef RS6000_BUILTIN_1
13677 #undef RS6000_BUILTIN_2
13678 #undef RS6000_BUILTIN_3
13679 #undef RS6000_BUILTIN_A
13680 #undef RS6000_BUILTIN_D
13681 #undef RS6000_BUILTIN_H
13682 #undef RS6000_BUILTIN_P
13683 #undef RS6000_BUILTIN_Q
13684 #undef RS6000_BUILTIN_X
13686 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13687 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13688 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13689 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13690 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13691 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13692 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13693 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13694 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
13695 { MASK, ICODE, NAME, ENUM },
13697 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13699 static const struct builtin_description bdesc_paired_preds
[] =
13701 #include "rs6000-builtin.def"
13704 /* ABS* operations. */
13706 #undef RS6000_BUILTIN_0
13707 #undef RS6000_BUILTIN_1
13708 #undef RS6000_BUILTIN_2
13709 #undef RS6000_BUILTIN_3
13710 #undef RS6000_BUILTIN_A
13711 #undef RS6000_BUILTIN_D
13712 #undef RS6000_BUILTIN_H
13713 #undef RS6000_BUILTIN_P
13714 #undef RS6000_BUILTIN_Q
13715 #undef RS6000_BUILTIN_X
13717 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13718 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13719 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13720 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13721 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
13722 { MASK, ICODE, NAME, ENUM },
13724 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13725 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13726 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13727 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13728 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13730 static const struct builtin_description bdesc_abs
[] =
13732 #include "rs6000-builtin.def"
13735 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
13738 #undef RS6000_BUILTIN_0
13739 #undef RS6000_BUILTIN_1
13740 #undef RS6000_BUILTIN_2
13741 #undef RS6000_BUILTIN_3
13742 #undef RS6000_BUILTIN_A
13743 #undef RS6000_BUILTIN_D
13744 #undef RS6000_BUILTIN_H
13745 #undef RS6000_BUILTIN_P
13746 #undef RS6000_BUILTIN_Q
13747 #undef RS6000_BUILTIN_X
13749 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13750 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
13751 { MASK, ICODE, NAME, ENUM },
13753 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13754 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13755 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13756 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13757 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13758 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13759 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13760 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13762 static const struct builtin_description bdesc_1arg
[] =
13764 #include "rs6000-builtin.def"
13767 /* Simple no-argument operations: result = __builtin_darn_32 () */
13769 #undef RS6000_BUILTIN_0
13770 #undef RS6000_BUILTIN_1
13771 #undef RS6000_BUILTIN_2
13772 #undef RS6000_BUILTIN_3
13773 #undef RS6000_BUILTIN_A
13774 #undef RS6000_BUILTIN_D
13775 #undef RS6000_BUILTIN_H
13776 #undef RS6000_BUILTIN_P
13777 #undef RS6000_BUILTIN_Q
13778 #undef RS6000_BUILTIN_X
13780 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
13781 { MASK, ICODE, NAME, ENUM },
13783 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13784 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13785 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13786 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13787 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13788 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13789 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13790 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13791 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13793 static const struct builtin_description bdesc_0arg
[] =
13795 #include "rs6000-builtin.def"
13798 /* HTM builtins. */
13799 #undef RS6000_BUILTIN_0
13800 #undef RS6000_BUILTIN_1
13801 #undef RS6000_BUILTIN_2
13802 #undef RS6000_BUILTIN_3
13803 #undef RS6000_BUILTIN_A
13804 #undef RS6000_BUILTIN_D
13805 #undef RS6000_BUILTIN_H
13806 #undef RS6000_BUILTIN_P
13807 #undef RS6000_BUILTIN_Q
13808 #undef RS6000_BUILTIN_X
13810 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13811 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13812 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13813 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13814 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13815 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13816 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
13817 { MASK, ICODE, NAME, ENUM },
13819 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13820 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13821 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13823 static const struct builtin_description bdesc_htm
[] =
13825 #include "rs6000-builtin.def"
13828 #undef RS6000_BUILTIN_0
13829 #undef RS6000_BUILTIN_1
13830 #undef RS6000_BUILTIN_2
13831 #undef RS6000_BUILTIN_3
13832 #undef RS6000_BUILTIN_A
13833 #undef RS6000_BUILTIN_D
13834 #undef RS6000_BUILTIN_H
13835 #undef RS6000_BUILTIN_P
13836 #undef RS6000_BUILTIN_Q
13838 /* Return true if a builtin function is overloaded. */
13840 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode
)
13842 return (rs6000_builtin_info
[(int)fncode
].attr
& RS6000_BTC_OVERLOADED
) != 0;
13846 rs6000_overloaded_builtin_name (enum rs6000_builtins fncode
)
13848 return rs6000_builtin_info
[(int)fncode
].name
;
13851 /* Expand an expression EXP that calls a builtin without arguments. */
13853 rs6000_expand_zeroop_builtin (enum insn_code icode
, rtx target
)
13856 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
13858 if (icode
== CODE_FOR_nothing
)
13859 /* Builtin not supported on this processor. */
13863 || GET_MODE (target
) != tmode
13864 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
13865 target
= gen_reg_rtx (tmode
);
13867 pat
= GEN_FCN (icode
) (target
);
13877 rs6000_expand_mtfsf_builtin (enum insn_code icode
, tree exp
)
13880 tree arg0
= CALL_EXPR_ARG (exp
, 0);
13881 tree arg1
= CALL_EXPR_ARG (exp
, 1);
13882 rtx op0
= expand_normal (arg0
);
13883 rtx op1
= expand_normal (arg1
);
13884 machine_mode mode0
= insn_data
[icode
].operand
[0].mode
;
13885 machine_mode mode1
= insn_data
[icode
].operand
[1].mode
;
13887 if (icode
== CODE_FOR_nothing
)
13888 /* Builtin not supported on this processor. */
13891 /* If we got invalid arguments bail out before generating bad rtl. */
13892 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
13895 if (GET_CODE (op0
) != CONST_INT
13896 || INTVAL (op0
) > 255
13897 || INTVAL (op0
) < 0)
13899 error ("argument 1 must be an 8-bit field value");
13903 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
13904 op0
= copy_to_mode_reg (mode0
, op0
);
13906 if (! (*insn_data
[icode
].operand
[1].predicate
) (op1
, mode1
))
13907 op1
= copy_to_mode_reg (mode1
, op1
);
13909 pat
= GEN_FCN (icode
) (op0
, op1
);
13918 rs6000_expand_unop_builtin (enum insn_code icode
, tree exp
, rtx target
)
13921 tree arg0
= CALL_EXPR_ARG (exp
, 0);
13922 rtx op0
= expand_normal (arg0
);
13923 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
13924 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
13926 if (icode
== CODE_FOR_nothing
)
13927 /* Builtin not supported on this processor. */
13930 /* If we got invalid arguments bail out before generating bad rtl. */
13931 if (arg0
== error_mark_node
)
13934 if (icode
== CODE_FOR_altivec_vspltisb
13935 || icode
== CODE_FOR_altivec_vspltish
13936 || icode
== CODE_FOR_altivec_vspltisw
)
13938 /* Only allow 5-bit *signed* literals. */
13939 if (GET_CODE (op0
) != CONST_INT
13940 || INTVAL (op0
) > 15
13941 || INTVAL (op0
) < -16)
13943 error ("argument 1 must be a 5-bit signed literal");
13944 return CONST0_RTX (tmode
);
13949 || GET_MODE (target
) != tmode
13950 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
13951 target
= gen_reg_rtx (tmode
);
13953 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
13954 op0
= copy_to_mode_reg (mode0
, op0
);
13956 pat
= GEN_FCN (icode
) (target
, op0
);
13965 altivec_expand_abs_builtin (enum insn_code icode
, tree exp
, rtx target
)
13967 rtx pat
, scratch1
, scratch2
;
13968 tree arg0
= CALL_EXPR_ARG (exp
, 0);
13969 rtx op0
= expand_normal (arg0
);
13970 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
13971 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
13973 /* If we have invalid arguments, bail out before generating bad rtl. */
13974 if (arg0
== error_mark_node
)
13978 || GET_MODE (target
) != tmode
13979 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
13980 target
= gen_reg_rtx (tmode
);
13982 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
13983 op0
= copy_to_mode_reg (mode0
, op0
);
13985 scratch1
= gen_reg_rtx (mode0
);
13986 scratch2
= gen_reg_rtx (mode0
);
13988 pat
= GEN_FCN (icode
) (target
, op0
, scratch1
, scratch2
);
13997 rs6000_expand_binop_builtin (enum insn_code icode
, tree exp
, rtx target
)
14000 tree arg0
= CALL_EXPR_ARG (exp
, 0);
14001 tree arg1
= CALL_EXPR_ARG (exp
, 1);
14002 rtx op0
= expand_normal (arg0
);
14003 rtx op1
= expand_normal (arg1
);
14004 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
14005 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
14006 machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
14008 if (icode
== CODE_FOR_nothing
)
14009 /* Builtin not supported on this processor. */
14012 /* If we got invalid arguments bail out before generating bad rtl. */
14013 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
14016 if (icode
== CODE_FOR_altivec_vcfux
14017 || icode
== CODE_FOR_altivec_vcfsx
14018 || icode
== CODE_FOR_altivec_vctsxs
14019 || icode
== CODE_FOR_altivec_vctuxs
14020 || icode
== CODE_FOR_altivec_vspltb
14021 || icode
== CODE_FOR_altivec_vsplth
14022 || icode
== CODE_FOR_altivec_vspltw
)
14024 /* Only allow 5-bit unsigned literals. */
14026 if (TREE_CODE (arg1
) != INTEGER_CST
14027 || TREE_INT_CST_LOW (arg1
) & ~0x1f)
14029 error ("argument 2 must be a 5-bit unsigned literal");
14030 return CONST0_RTX (tmode
);
14033 else if (icode
== CODE_FOR_dfptstsfi_eq_dd
14034 || icode
== CODE_FOR_dfptstsfi_lt_dd
14035 || icode
== CODE_FOR_dfptstsfi_gt_dd
14036 || icode
== CODE_FOR_dfptstsfi_unordered_dd
14037 || icode
== CODE_FOR_dfptstsfi_eq_td
14038 || icode
== CODE_FOR_dfptstsfi_lt_td
14039 || icode
== CODE_FOR_dfptstsfi_gt_td
14040 || icode
== CODE_FOR_dfptstsfi_unordered_td
)
14042 /* Only allow 6-bit unsigned literals. */
14044 if (TREE_CODE (arg0
) != INTEGER_CST
14045 || !IN_RANGE (TREE_INT_CST_LOW (arg0
), 0, 63))
14047 error ("argument 1 must be a 6-bit unsigned literal");
14048 return CONST0_RTX (tmode
);
14051 else if (icode
== CODE_FOR_xststdcqp
14052 || icode
== CODE_FOR_xststdcdp
14053 || icode
== CODE_FOR_xststdcsp
14054 || icode
== CODE_FOR_xvtstdcdp
14055 || icode
== CODE_FOR_xvtstdcsp
)
14057 /* Only allow 7-bit unsigned literals. */
14059 if (TREE_CODE (arg1
) != INTEGER_CST
14060 || !IN_RANGE (TREE_INT_CST_LOW (arg1
), 0, 127))
14062 error ("argument 2 must be a 7-bit unsigned literal");
14063 return CONST0_RTX (tmode
);
14066 else if (icode
== CODE_FOR_unpackv1ti
14067 || icode
== CODE_FOR_unpackkf
14068 || icode
== CODE_FOR_unpacktf
14069 || icode
== CODE_FOR_unpackif
14070 || icode
== CODE_FOR_unpacktd
)
14072 /* Only allow 1-bit unsigned literals. */
14074 if (TREE_CODE (arg1
) != INTEGER_CST
14075 || !IN_RANGE (TREE_INT_CST_LOW (arg1
), 0, 1))
14077 error ("argument 2 must be a 1-bit unsigned literal");
14078 return CONST0_RTX (tmode
);
14083 || GET_MODE (target
) != tmode
14084 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
14085 target
= gen_reg_rtx (tmode
);
14087 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
14088 op0
= copy_to_mode_reg (mode0
, op0
);
14089 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
14090 op1
= copy_to_mode_reg (mode1
, op1
);
14092 pat
= GEN_FCN (icode
) (target
, op0
, op1
);
14101 altivec_expand_predicate_builtin (enum insn_code icode
, tree exp
, rtx target
)
14104 tree cr6_form
= CALL_EXPR_ARG (exp
, 0);
14105 tree arg0
= CALL_EXPR_ARG (exp
, 1);
14106 tree arg1
= CALL_EXPR_ARG (exp
, 2);
14107 rtx op0
= expand_normal (arg0
);
14108 rtx op1
= expand_normal (arg1
);
14109 machine_mode tmode
= SImode
;
14110 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
14111 machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
14114 if (TREE_CODE (cr6_form
) != INTEGER_CST
)
14116 error ("argument 1 of %qs must be a constant",
14117 "__builtin_altivec_predicate");
14121 cr6_form_int
= TREE_INT_CST_LOW (cr6_form
);
14123 gcc_assert (mode0
== mode1
);
14125 /* If we have invalid arguments, bail out before generating bad rtl. */
14126 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
14130 || GET_MODE (target
) != tmode
14131 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
14132 target
= gen_reg_rtx (tmode
);
14134 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
14135 op0
= copy_to_mode_reg (mode0
, op0
);
14136 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
14137 op1
= copy_to_mode_reg (mode1
, op1
);
14139 /* Note that for many of the relevant operations (e.g. cmpne or
14140 cmpeq) with float or double operands, it makes more sense for the
14141 mode of the allocated scratch register to select a vector of
14142 integer. But the choice to copy the mode of operand 0 was made
14143 long ago and there are no plans to change it. */
14144 scratch
= gen_reg_rtx (mode0
);
14146 pat
= GEN_FCN (icode
) (scratch
, op0
, op1
);
14151 /* The vec_any* and vec_all* predicates use the same opcodes for two
14152 different operations, but the bits in CR6 will be different
14153 depending on what information we want. So we have to play tricks
14154 with CR6 to get the right bits out.
14156 If you think this is disgusting, look at the specs for the
14157 AltiVec predicates. */
14159 switch (cr6_form_int
)
14162 emit_insn (gen_cr6_test_for_zero (target
));
14165 emit_insn (gen_cr6_test_for_zero_reverse (target
));
14168 emit_insn (gen_cr6_test_for_lt (target
));
14171 emit_insn (gen_cr6_test_for_lt_reverse (target
));
14174 error ("argument 1 of %qs is out of range",
14175 "__builtin_altivec_predicate");
14183 paired_expand_lv_builtin (enum insn_code icode
, tree exp
, rtx target
)
14186 tree arg0
= CALL_EXPR_ARG (exp
, 0);
14187 tree arg1
= CALL_EXPR_ARG (exp
, 1);
14188 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
14189 machine_mode mode0
= Pmode
;
14190 machine_mode mode1
= Pmode
;
14191 rtx op0
= expand_normal (arg0
);
14192 rtx op1
= expand_normal (arg1
);
14194 if (icode
== CODE_FOR_nothing
)
14195 /* Builtin not supported on this processor. */
14198 /* If we got invalid arguments bail out before generating bad rtl. */
14199 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
14203 || GET_MODE (target
) != tmode
14204 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
14205 target
= gen_reg_rtx (tmode
);
14207 op1
= copy_to_mode_reg (mode1
, op1
);
14209 if (op0
== const0_rtx
)
14211 addr
= gen_rtx_MEM (tmode
, op1
);
14215 op0
= copy_to_mode_reg (mode0
, op0
);
14216 addr
= gen_rtx_MEM (tmode
, gen_rtx_PLUS (Pmode
, op0
, op1
));
14219 pat
= GEN_FCN (icode
) (target
, addr
);
14228 /* Return a constant vector for use as a little-endian permute control vector
14229 to reverse the order of elements of the given vector mode. */
14231 swap_selector_for_mode (machine_mode mode
)
14233 /* These are little endian vectors, so their elements are reversed
14234 from what you would normally expect for a permute control vector. */
14235 unsigned int swap2
[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
14236 unsigned int swap4
[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
14237 unsigned int swap8
[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
14238 unsigned int swap16
[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
14239 unsigned int *swaparray
, i
;
14256 swaparray
= swap16
;
14259 gcc_unreachable ();
14262 for (i
= 0; i
< 16; ++i
)
14263 perm
[i
] = GEN_INT (swaparray
[i
]);
14265 return force_reg (V16QImode
, gen_rtx_CONST_VECTOR (V16QImode
, gen_rtvec_v (16, perm
)));
14268 /* Generate code for an "lvxl", or "lve*x" built-in for a little endian target
14269 with -maltivec=be specified. Issue the load followed by an element-
14270 reversing permute. */
14272 altivec_expand_lvx_be (rtx op0
, rtx op1
, machine_mode mode
, unsigned unspec
)
14274 rtx tmp
= gen_reg_rtx (mode
);
14275 rtx load
= gen_rtx_SET (tmp
, op1
);
14276 rtx lvx
= gen_rtx_UNSPEC (mode
, gen_rtvec (1, const0_rtx
), unspec
);
14277 rtx par
= gen_rtx_PARALLEL (mode
, gen_rtvec (2, load
, lvx
));
14278 rtx sel
= swap_selector_for_mode (mode
);
14279 rtx vperm
= gen_rtx_UNSPEC (mode
, gen_rtvec (3, tmp
, tmp
, sel
), UNSPEC_VPERM
);
14281 gcc_assert (REG_P (op0
));
14283 emit_insn (gen_rtx_SET (op0
, vperm
));
14286 /* Generate code for a "stvxl" built-in for a little endian target with
14287 -maltivec=be specified. Issue the store preceded by an element-reversing
14290 altivec_expand_stvx_be (rtx op0
, rtx op1
, machine_mode mode
, unsigned unspec
)
14292 rtx tmp
= gen_reg_rtx (mode
);
14293 rtx store
= gen_rtx_SET (op0
, tmp
);
14294 rtx stvx
= gen_rtx_UNSPEC (mode
, gen_rtvec (1, const0_rtx
), unspec
);
14295 rtx par
= gen_rtx_PARALLEL (mode
, gen_rtvec (2, store
, stvx
));
14296 rtx sel
= swap_selector_for_mode (mode
);
14299 gcc_assert (REG_P (op1
));
14300 vperm
= gen_rtx_UNSPEC (mode
, gen_rtvec (3, op1
, op1
, sel
), UNSPEC_VPERM
);
14301 emit_insn (gen_rtx_SET (tmp
, vperm
));
14305 /* Generate code for a "stve*x" built-in for a little endian target with -maltivec=be
14306 specified. Issue the store preceded by an element-reversing permute. */
14308 altivec_expand_stvex_be (rtx op0
, rtx op1
, machine_mode mode
, unsigned unspec
)
14310 machine_mode inner_mode
= GET_MODE_INNER (mode
);
14311 rtx tmp
= gen_reg_rtx (mode
);
14312 rtx stvx
= gen_rtx_UNSPEC (inner_mode
, gen_rtvec (1, tmp
), unspec
);
14313 rtx sel
= swap_selector_for_mode (mode
);
14316 gcc_assert (REG_P (op1
));
14317 vperm
= gen_rtx_UNSPEC (mode
, gen_rtvec (3, op1
, op1
, sel
), UNSPEC_VPERM
);
14318 emit_insn (gen_rtx_SET (tmp
, vperm
));
14319 emit_insn (gen_rtx_SET (op0
, stvx
));
14323 altivec_expand_lv_builtin (enum insn_code icode
, tree exp
, rtx target
, bool blk
)
14326 tree arg0
= CALL_EXPR_ARG (exp
, 0);
14327 tree arg1
= CALL_EXPR_ARG (exp
, 1);
14328 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
14329 machine_mode mode0
= Pmode
;
14330 machine_mode mode1
= Pmode
;
14331 rtx op0
= expand_normal (arg0
);
14332 rtx op1
= expand_normal (arg1
);
14334 if (icode
== CODE_FOR_nothing
)
14335 /* Builtin not supported on this processor. */
14338 /* If we got invalid arguments bail out before generating bad rtl. */
14339 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
14343 || GET_MODE (target
) != tmode
14344 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
14345 target
= gen_reg_rtx (tmode
);
14347 op1
= copy_to_mode_reg (mode1
, op1
);
14349 /* For LVX, express the RTL accurately by ANDing the address with -16.
14350 LVXL and LVE*X expand to use UNSPECs to hide their special behavior,
14351 so the raw address is fine. */
14352 if (icode
== CODE_FOR_altivec_lvx_v2df_2op
14353 || icode
== CODE_FOR_altivec_lvx_v2di_2op
14354 || icode
== CODE_FOR_altivec_lvx_v4sf_2op
14355 || icode
== CODE_FOR_altivec_lvx_v4si_2op
14356 || icode
== CODE_FOR_altivec_lvx_v8hi_2op
14357 || icode
== CODE_FOR_altivec_lvx_v16qi_2op
)
14360 if (op0
== const0_rtx
)
14364 op0
= copy_to_mode_reg (mode0
, op0
);
14365 rawaddr
= gen_rtx_PLUS (Pmode
, op1
, op0
);
14367 addr
= gen_rtx_AND (Pmode
, rawaddr
, gen_rtx_CONST_INT (Pmode
, -16));
14368 addr
= gen_rtx_MEM (blk
? BLKmode
: tmode
, addr
);
14370 /* For -maltivec=be, emit the load and follow it up with a
14371 permute to swap the elements. */
14372 if (!BYTES_BIG_ENDIAN
&& VECTOR_ELT_ORDER_BIG
)
14374 rtx temp
= gen_reg_rtx (tmode
);
14375 emit_insn (gen_rtx_SET (temp
, addr
));
14377 rtx sel
= swap_selector_for_mode (tmode
);
14378 rtx vperm
= gen_rtx_UNSPEC (tmode
, gen_rtvec (3, temp
, temp
, sel
),
14380 emit_insn (gen_rtx_SET (target
, vperm
));
14383 emit_insn (gen_rtx_SET (target
, addr
));
14387 if (op0
== const0_rtx
)
14388 addr
= gen_rtx_MEM (blk
? BLKmode
: tmode
, op1
);
14391 op0
= copy_to_mode_reg (mode0
, op0
);
14392 addr
= gen_rtx_MEM (blk
? BLKmode
: tmode
,
14393 gen_rtx_PLUS (Pmode
, op1
, op0
));
14396 pat
= GEN_FCN (icode
) (target
, addr
);
14406 altivec_expand_xl_be_builtin (enum insn_code icode
, tree exp
, rtx target
, bool blk
)
14409 tree arg0
= CALL_EXPR_ARG (exp
, 0);
14410 tree arg1
= CALL_EXPR_ARG (exp
, 1);
14411 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
14412 machine_mode mode0
= Pmode
;
14413 machine_mode mode1
= Pmode
;
14414 rtx op0
= expand_normal (arg0
);
14415 rtx op1
= expand_normal (arg1
);
14417 if (icode
== CODE_FOR_nothing
)
14418 /* Builtin not supported on this processor. */
14421 /* If we got invalid arguments bail out before generating bad rtl. */
14422 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
14426 || GET_MODE (target
) != tmode
14427 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
14428 target
= gen_reg_rtx (tmode
);
14430 op1
= copy_to_mode_reg (mode1
, op1
);
14432 if (op0
== const0_rtx
)
14433 addr
= gen_rtx_MEM (blk
? BLKmode
: tmode
, op1
);
14436 op0
= copy_to_mode_reg (mode0
, op0
);
14437 addr
= gen_rtx_MEM (blk
? BLKmode
: tmode
,
14438 gen_rtx_PLUS (Pmode
, op1
, op0
));
14441 pat
= GEN_FCN (icode
) (target
, addr
);
14446 /* Reverse element order of elements if in LE mode */
14447 if (!VECTOR_ELT_ORDER_BIG
)
14449 rtx sel
= swap_selector_for_mode (tmode
);
14450 rtx vperm
= gen_rtx_UNSPEC (tmode
, gen_rtvec (3, target
, target
, sel
),
14452 emit_insn (gen_rtx_SET (target
, vperm
));
14458 paired_expand_stv_builtin (enum insn_code icode
, tree exp
)
14460 tree arg0
= CALL_EXPR_ARG (exp
, 0);
14461 tree arg1
= CALL_EXPR_ARG (exp
, 1);
14462 tree arg2
= CALL_EXPR_ARG (exp
, 2);
14463 rtx op0
= expand_normal (arg0
);
14464 rtx op1
= expand_normal (arg1
);
14465 rtx op2
= expand_normal (arg2
);
14467 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
14468 machine_mode mode1
= Pmode
;
14469 machine_mode mode2
= Pmode
;
14471 /* Invalid arguments. Bail before doing anything stoopid! */
14472 if (arg0
== error_mark_node
14473 || arg1
== error_mark_node
14474 || arg2
== error_mark_node
)
14477 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, tmode
))
14478 op0
= copy_to_mode_reg (tmode
, op0
);
14480 op2
= copy_to_mode_reg (mode2
, op2
);
14482 if (op1
== const0_rtx
)
14484 addr
= gen_rtx_MEM (tmode
, op2
);
14488 op1
= copy_to_mode_reg (mode1
, op1
);
14489 addr
= gen_rtx_MEM (tmode
, gen_rtx_PLUS (Pmode
, op1
, op2
));
14492 pat
= GEN_FCN (icode
) (addr
, op0
);
14499 altivec_expand_stxvl_builtin (enum insn_code icode
, tree exp
)
14502 tree arg0
= CALL_EXPR_ARG (exp
, 0);
14503 tree arg1
= CALL_EXPR_ARG (exp
, 1);
14504 tree arg2
= CALL_EXPR_ARG (exp
, 2);
14505 rtx op0
= expand_normal (arg0
);
14506 rtx op1
= expand_normal (arg1
);
14507 rtx op2
= expand_normal (arg2
);
14508 machine_mode mode0
= insn_data
[icode
].operand
[0].mode
;
14509 machine_mode mode1
= insn_data
[icode
].operand
[1].mode
;
14510 machine_mode mode2
= insn_data
[icode
].operand
[2].mode
;
14512 if (icode
== CODE_FOR_nothing
)
14513 /* Builtin not supported on this processor. */
14516 /* If we got invalid arguments bail out before generating bad rtl. */
14517 if (arg0
== error_mark_node
14518 || arg1
== error_mark_node
14519 || arg2
== error_mark_node
)
14522 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
14523 op0
= copy_to_mode_reg (mode0
, op0
);
14524 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
14525 op1
= copy_to_mode_reg (mode1
, op1
);
14526 if (! (*insn_data
[icode
].operand
[3].predicate
) (op2
, mode2
))
14527 op2
= copy_to_mode_reg (mode2
, op2
);
14529 pat
= GEN_FCN (icode
) (op0
, op1
, op2
);
14537 altivec_expand_stv_builtin (enum insn_code icode
, tree exp
)
14539 tree arg0
= CALL_EXPR_ARG (exp
, 0);
14540 tree arg1
= CALL_EXPR_ARG (exp
, 1);
14541 tree arg2
= CALL_EXPR_ARG (exp
, 2);
14542 rtx op0
= expand_normal (arg0
);
14543 rtx op1
= expand_normal (arg1
);
14544 rtx op2
= expand_normal (arg2
);
14545 rtx pat
, addr
, rawaddr
;
14546 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
14547 machine_mode smode
= insn_data
[icode
].operand
[1].mode
;
14548 machine_mode mode1
= Pmode
;
14549 machine_mode mode2
= Pmode
;
14551 /* Invalid arguments. Bail before doing anything stoopid! */
14552 if (arg0
== error_mark_node
14553 || arg1
== error_mark_node
14554 || arg2
== error_mark_node
)
14557 op2
= copy_to_mode_reg (mode2
, op2
);
14559 /* For STVX, express the RTL accurately by ANDing the address with -16.
14560 STVXL and STVE*X expand to use UNSPECs to hide their special behavior,
14561 so the raw address is fine. */
14562 if (icode
== CODE_FOR_altivec_stvx_v2df_2op
14563 || icode
== CODE_FOR_altivec_stvx_v2di_2op
14564 || icode
== CODE_FOR_altivec_stvx_v4sf_2op
14565 || icode
== CODE_FOR_altivec_stvx_v4si_2op
14566 || icode
== CODE_FOR_altivec_stvx_v8hi_2op
14567 || icode
== CODE_FOR_altivec_stvx_v16qi_2op
)
14569 if (op1
== const0_rtx
)
14573 op1
= copy_to_mode_reg (mode1
, op1
);
14574 rawaddr
= gen_rtx_PLUS (Pmode
, op2
, op1
);
14577 addr
= gen_rtx_AND (Pmode
, rawaddr
, gen_rtx_CONST_INT (Pmode
, -16));
14578 addr
= gen_rtx_MEM (tmode
, addr
);
14580 op0
= copy_to_mode_reg (tmode
, op0
);
14582 /* For -maltivec=be, emit a permute to swap the elements, followed
14584 if (!BYTES_BIG_ENDIAN
&& VECTOR_ELT_ORDER_BIG
)
14586 rtx temp
= gen_reg_rtx (tmode
);
14587 rtx sel
= swap_selector_for_mode (tmode
);
14588 rtx vperm
= gen_rtx_UNSPEC (tmode
, gen_rtvec (3, op0
, op0
, sel
),
14590 emit_insn (gen_rtx_SET (temp
, vperm
));
14591 emit_insn (gen_rtx_SET (addr
, temp
));
14594 emit_insn (gen_rtx_SET (addr
, op0
));
14598 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, smode
))
14599 op0
= copy_to_mode_reg (smode
, op0
);
14601 if (op1
== const0_rtx
)
14602 addr
= gen_rtx_MEM (tmode
, op2
);
14605 op1
= copy_to_mode_reg (mode1
, op1
);
14606 addr
= gen_rtx_MEM (tmode
, gen_rtx_PLUS (Pmode
, op2
, op1
));
14609 pat
= GEN_FCN (icode
) (addr
, op0
);
14617 /* Return the appropriate SPR number associated with the given builtin. */
14618 static inline HOST_WIDE_INT
14619 htm_spr_num (enum rs6000_builtins code
)
14621 if (code
== HTM_BUILTIN_GET_TFHAR
14622 || code
== HTM_BUILTIN_SET_TFHAR
)
14624 else if (code
== HTM_BUILTIN_GET_TFIAR
14625 || code
== HTM_BUILTIN_SET_TFIAR
)
14627 else if (code
== HTM_BUILTIN_GET_TEXASR
14628 || code
== HTM_BUILTIN_SET_TEXASR
)
14630 gcc_assert (code
== HTM_BUILTIN_GET_TEXASRU
14631 || code
== HTM_BUILTIN_SET_TEXASRU
);
14632 return TEXASRU_SPR
;
14635 /* Return the appropriate SPR regno associated with the given builtin. */
14636 static inline HOST_WIDE_INT
14637 htm_spr_regno (enum rs6000_builtins code
)
14639 if (code
== HTM_BUILTIN_GET_TFHAR
14640 || code
== HTM_BUILTIN_SET_TFHAR
)
14641 return TFHAR_REGNO
;
14642 else if (code
== HTM_BUILTIN_GET_TFIAR
14643 || code
== HTM_BUILTIN_SET_TFIAR
)
14644 return TFIAR_REGNO
;
14645 gcc_assert (code
== HTM_BUILTIN_GET_TEXASR
14646 || code
== HTM_BUILTIN_SET_TEXASR
14647 || code
== HTM_BUILTIN_GET_TEXASRU
14648 || code
== HTM_BUILTIN_SET_TEXASRU
);
14649 return TEXASR_REGNO
;
14652 /* Return the correct ICODE value depending on whether we are
14653 setting or reading the HTM SPRs. */
14654 static inline enum insn_code
14655 rs6000_htm_spr_icode (bool nonvoid
)
14658 return (TARGET_POWERPC64
) ? CODE_FOR_htm_mfspr_di
: CODE_FOR_htm_mfspr_si
;
14660 return (TARGET_POWERPC64
) ? CODE_FOR_htm_mtspr_di
: CODE_FOR_htm_mtspr_si
;
14663 /* Expand the HTM builtin in EXP and store the result in TARGET.
14664 Store true in *EXPANDEDP if we found a builtin to expand. */
14666 htm_expand_builtin (tree exp
, rtx target
, bool * expandedp
)
14668 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
14669 bool nonvoid
= TREE_TYPE (TREE_TYPE (fndecl
)) != void_type_node
;
14670 enum rs6000_builtins fcode
= (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
14671 const struct builtin_description
*d
;
14676 if (!TARGET_POWERPC64
14677 && (fcode
== HTM_BUILTIN_TABORTDC
14678 || fcode
== HTM_BUILTIN_TABORTDCI
))
14680 size_t uns_fcode
= (size_t)fcode
;
14681 const char *name
= rs6000_builtin_info
[uns_fcode
].name
;
14682 error ("builtin %qs is only valid in 64-bit mode", name
);
14686 /* Expand the HTM builtins. */
14688 for (i
= 0; i
< ARRAY_SIZE (bdesc_htm
); i
++, d
++)
14689 if (d
->code
== fcode
)
14691 rtx op
[MAX_HTM_OPERANDS
], pat
;
14694 call_expr_arg_iterator iter
;
14695 unsigned attr
= rs6000_builtin_info
[fcode
].attr
;
14696 enum insn_code icode
= d
->icode
;
14697 const struct insn_operand_data
*insn_op
;
14698 bool uses_spr
= (attr
& RS6000_BTC_SPR
);
14702 icode
= rs6000_htm_spr_icode (nonvoid
);
14703 insn_op
= &insn_data
[icode
].operand
[0];
14707 machine_mode tmode
= (uses_spr
) ? insn_op
->mode
: E_SImode
;
14709 || GET_MODE (target
) != tmode
14710 || (uses_spr
&& !(*insn_op
->predicate
) (target
, tmode
)))
14711 target
= gen_reg_rtx (tmode
);
14713 op
[nopnds
++] = target
;
14716 FOR_EACH_CALL_EXPR_ARG (arg
, iter
, exp
)
14718 if (arg
== error_mark_node
|| nopnds
>= MAX_HTM_OPERANDS
)
14721 insn_op
= &insn_data
[icode
].operand
[nopnds
];
14723 op
[nopnds
] = expand_normal (arg
);
14725 if (!(*insn_op
->predicate
) (op
[nopnds
], insn_op
->mode
))
14727 if (!strcmp (insn_op
->constraint
, "n"))
14729 int arg_num
= (nonvoid
) ? nopnds
: nopnds
+ 1;
14730 if (!CONST_INT_P (op
[nopnds
]))
14731 error ("argument %d must be an unsigned literal", arg_num
);
14733 error ("argument %d is an unsigned literal that is "
14734 "out of range", arg_num
);
14737 op
[nopnds
] = copy_to_mode_reg (insn_op
->mode
, op
[nopnds
]);
14743 /* Handle the builtins for extended mnemonics. These accept
14744 no arguments, but map to builtins that take arguments. */
14747 case HTM_BUILTIN_TENDALL
: /* Alias for: tend. 1 */
14748 case HTM_BUILTIN_TRESUME
: /* Alias for: tsr. 1 */
14749 op
[nopnds
++] = GEN_INT (1);
14751 attr
|= RS6000_BTC_UNARY
;
14753 case HTM_BUILTIN_TSUSPEND
: /* Alias for: tsr. 0 */
14754 op
[nopnds
++] = GEN_INT (0);
14756 attr
|= RS6000_BTC_UNARY
;
14762 /* If this builtin accesses SPRs, then pass in the appropriate
14763 SPR number and SPR regno as the last two operands. */
14766 machine_mode mode
= (TARGET_POWERPC64
) ? DImode
: SImode
;
14767 op
[nopnds
++] = gen_rtx_CONST_INT (mode
, htm_spr_num (fcode
));
14768 op
[nopnds
++] = gen_rtx_REG (mode
, htm_spr_regno (fcode
));
14770 /* If this builtin accesses a CR, then pass in a scratch
14771 CR as the last operand. */
14772 else if (attr
& RS6000_BTC_CR
)
14773 { cr
= gen_reg_rtx (CCmode
);
14779 int expected_nopnds
= 0;
14780 if ((attr
& RS6000_BTC_TYPE_MASK
) == RS6000_BTC_UNARY
)
14781 expected_nopnds
= 1;
14782 else if ((attr
& RS6000_BTC_TYPE_MASK
) == RS6000_BTC_BINARY
)
14783 expected_nopnds
= 2;
14784 else if ((attr
& RS6000_BTC_TYPE_MASK
) == RS6000_BTC_TERNARY
)
14785 expected_nopnds
= 3;
14786 if (!(attr
& RS6000_BTC_VOID
))
14787 expected_nopnds
+= 1;
14789 expected_nopnds
+= 2;
14791 gcc_assert (nopnds
== expected_nopnds
14792 && nopnds
<= MAX_HTM_OPERANDS
);
14798 pat
= GEN_FCN (icode
) (op
[0]);
14801 pat
= GEN_FCN (icode
) (op
[0], op
[1]);
14804 pat
= GEN_FCN (icode
) (op
[0], op
[1], op
[2]);
14807 pat
= GEN_FCN (icode
) (op
[0], op
[1], op
[2], op
[3]);
14810 gcc_unreachable ();
14816 if (attr
& RS6000_BTC_CR
)
14818 if (fcode
== HTM_BUILTIN_TBEGIN
)
14820 /* Emit code to set TARGET to true or false depending on
14821 whether the tbegin. instruction successfully or failed
14822 to start a transaction. We do this by placing the 1's
14823 complement of CR's EQ bit into TARGET. */
14824 rtx scratch
= gen_reg_rtx (SImode
);
14825 emit_insn (gen_rtx_SET (scratch
,
14826 gen_rtx_EQ (SImode
, cr
,
14828 emit_insn (gen_rtx_SET (target
,
14829 gen_rtx_XOR (SImode
, scratch
,
14834 /* Emit code to copy the 4-bit condition register field
14835 CR into the least significant end of register TARGET. */
14836 rtx scratch1
= gen_reg_rtx (SImode
);
14837 rtx scratch2
= gen_reg_rtx (SImode
);
14838 rtx subreg
= simplify_gen_subreg (CCmode
, scratch1
, SImode
, 0);
14839 emit_insn (gen_movcc (subreg
, cr
));
14840 emit_insn (gen_lshrsi3 (scratch2
, scratch1
, GEN_INT (28)));
14841 emit_insn (gen_andsi3 (target
, scratch2
, GEN_INT (0xf)));
14850 *expandedp
= false;
14854 /* Expand the CPU builtin in FCODE and store the result in TARGET. */
14857 cpu_expand_builtin (enum rs6000_builtins fcode
, tree exp ATTRIBUTE_UNUSED
,
14860 /* __builtin_cpu_init () is a nop, so expand to nothing. */
14861 if (fcode
== RS6000_BUILTIN_CPU_INIT
)
14864 if (target
== 0 || GET_MODE (target
) != SImode
)
14865 target
= gen_reg_rtx (SImode
);
14867 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
14868 tree arg
= TREE_OPERAND (CALL_EXPR_ARG (exp
, 0), 0);
14869 /* Target clones creates an ARRAY_REF instead of STRING_CST, convert it back
14870 to a STRING_CST. */
14871 if (TREE_CODE (arg
) == ARRAY_REF
14872 && TREE_CODE (TREE_OPERAND (arg
, 0)) == STRING_CST
14873 && TREE_CODE (TREE_OPERAND (arg
, 1)) == INTEGER_CST
14874 && compare_tree_int (TREE_OPERAND (arg
, 1), 0) == 0)
14875 arg
= TREE_OPERAND (arg
, 0);
14877 if (TREE_CODE (arg
) != STRING_CST
)
14879 error ("builtin %qs only accepts a string argument",
14880 rs6000_builtin_info
[(size_t) fcode
].name
);
14884 if (fcode
== RS6000_BUILTIN_CPU_IS
)
14886 const char *cpu
= TREE_STRING_POINTER (arg
);
14887 rtx cpuid
= NULL_RTX
;
14888 for (size_t i
= 0; i
< ARRAY_SIZE (cpu_is_info
); i
++)
14889 if (strcmp (cpu
, cpu_is_info
[i
].cpu
) == 0)
14891 /* The CPUID value in the TCB is offset by _DL_FIRST_PLATFORM. */
14892 cpuid
= GEN_INT (cpu_is_info
[i
].cpuid
+ _DL_FIRST_PLATFORM
);
14895 if (cpuid
== NULL_RTX
)
14897 /* Invalid CPU argument. */
14898 error ("cpu %qs is an invalid argument to builtin %qs",
14899 cpu
, rs6000_builtin_info
[(size_t) fcode
].name
);
14903 rtx platform
= gen_reg_rtx (SImode
);
14904 rtx tcbmem
= gen_const_mem (SImode
,
14905 gen_rtx_PLUS (Pmode
,
14906 gen_rtx_REG (Pmode
, TLS_REGNUM
),
14907 GEN_INT (TCB_PLATFORM_OFFSET
)));
14908 emit_move_insn (platform
, tcbmem
);
14909 emit_insn (gen_eqsi3 (target
, platform
, cpuid
));
14911 else if (fcode
== RS6000_BUILTIN_CPU_SUPPORTS
)
14913 const char *hwcap
= TREE_STRING_POINTER (arg
);
14914 rtx mask
= NULL_RTX
;
14916 for (size_t i
= 0; i
< ARRAY_SIZE (cpu_supports_info
); i
++)
14917 if (strcmp (hwcap
, cpu_supports_info
[i
].hwcap
) == 0)
14919 mask
= GEN_INT (cpu_supports_info
[i
].mask
);
14920 hwcap_offset
= TCB_HWCAP_OFFSET (cpu_supports_info
[i
].id
);
14923 if (mask
== NULL_RTX
)
14925 /* Invalid HWCAP argument. */
14926 error ("%s %qs is an invalid argument to builtin %qs",
14927 "hwcap", hwcap
, rs6000_builtin_info
[(size_t) fcode
].name
);
14931 rtx tcb_hwcap
= gen_reg_rtx (SImode
);
14932 rtx tcbmem
= gen_const_mem (SImode
,
14933 gen_rtx_PLUS (Pmode
,
14934 gen_rtx_REG (Pmode
, TLS_REGNUM
),
14935 GEN_INT (hwcap_offset
)));
14936 emit_move_insn (tcb_hwcap
, tcbmem
);
14937 rtx scratch1
= gen_reg_rtx (SImode
);
14938 emit_insn (gen_rtx_SET (scratch1
, gen_rtx_AND (SImode
, tcb_hwcap
, mask
)));
14939 rtx scratch2
= gen_reg_rtx (SImode
);
14940 emit_insn (gen_eqsi3 (scratch2
, scratch1
, const0_rtx
));
14941 emit_insn (gen_rtx_SET (target
, gen_rtx_XOR (SImode
, scratch2
, const1_rtx
)));
14944 gcc_unreachable ();
14946 /* Record that we have expanded a CPU builtin, so that we can later
14947 emit a reference to the special symbol exported by LIBC to ensure we
14948 do not link against an old LIBC that doesn't support this feature. */
14949 cpu_builtin_p
= true;
14952 warning (0, "builtin %qs needs GLIBC (2.23 and newer) that exports hardware "
14953 "capability bits", rs6000_builtin_info
[(size_t) fcode
].name
);
14955 /* For old LIBCs, always return FALSE. */
14956 emit_move_insn (target
, GEN_INT (0));
14957 #endif /* TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */
14963 rs6000_expand_ternop_builtin (enum insn_code icode
, tree exp
, rtx target
)
14966 tree arg0
= CALL_EXPR_ARG (exp
, 0);
14967 tree arg1
= CALL_EXPR_ARG (exp
, 1);
14968 tree arg2
= CALL_EXPR_ARG (exp
, 2);
14969 rtx op0
= expand_normal (arg0
);
14970 rtx op1
= expand_normal (arg1
);
14971 rtx op2
= expand_normal (arg2
);
14972 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
14973 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
14974 machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
14975 machine_mode mode2
= insn_data
[icode
].operand
[3].mode
;
14977 if (icode
== CODE_FOR_nothing
)
14978 /* Builtin not supported on this processor. */
14981 /* If we got invalid arguments bail out before generating bad rtl. */
14982 if (arg0
== error_mark_node
14983 || arg1
== error_mark_node
14984 || arg2
== error_mark_node
)
14987 /* Check and prepare argument depending on the instruction code.
14989 Note that a switch statement instead of the sequence of tests
14990 would be incorrect as many of the CODE_FOR values could be
14991 CODE_FOR_nothing and that would yield multiple alternatives
14992 with identical values. We'd never reach here at runtime in
14994 if (icode
== CODE_FOR_altivec_vsldoi_v4sf
14995 || icode
== CODE_FOR_altivec_vsldoi_v2df
14996 || icode
== CODE_FOR_altivec_vsldoi_v4si
14997 || icode
== CODE_FOR_altivec_vsldoi_v8hi
14998 || icode
== CODE_FOR_altivec_vsldoi_v16qi
)
15000 /* Only allow 4-bit unsigned literals. */
15002 if (TREE_CODE (arg2
) != INTEGER_CST
15003 || TREE_INT_CST_LOW (arg2
) & ~0xf)
15005 error ("argument 3 must be a 4-bit unsigned literal");
15006 return CONST0_RTX (tmode
);
15009 else if (icode
== CODE_FOR_vsx_xxpermdi_v2df
15010 || icode
== CODE_FOR_vsx_xxpermdi_v2di
15011 || icode
== CODE_FOR_vsx_xxpermdi_v2df_be
15012 || icode
== CODE_FOR_vsx_xxpermdi_v2di_be
15013 || icode
== CODE_FOR_vsx_xxpermdi_v1ti
15014 || icode
== CODE_FOR_vsx_xxpermdi_v4sf
15015 || icode
== CODE_FOR_vsx_xxpermdi_v4si
15016 || icode
== CODE_FOR_vsx_xxpermdi_v8hi
15017 || icode
== CODE_FOR_vsx_xxpermdi_v16qi
15018 || icode
== CODE_FOR_vsx_xxsldwi_v16qi
15019 || icode
== CODE_FOR_vsx_xxsldwi_v8hi
15020 || icode
== CODE_FOR_vsx_xxsldwi_v4si
15021 || icode
== CODE_FOR_vsx_xxsldwi_v4sf
15022 || icode
== CODE_FOR_vsx_xxsldwi_v2di
15023 || icode
== CODE_FOR_vsx_xxsldwi_v2df
)
15025 /* Only allow 2-bit unsigned literals. */
15027 if (TREE_CODE (arg2
) != INTEGER_CST
15028 || TREE_INT_CST_LOW (arg2
) & ~0x3)
15030 error ("argument 3 must be a 2-bit unsigned literal");
15031 return CONST0_RTX (tmode
);
15034 else if (icode
== CODE_FOR_vsx_set_v2df
15035 || icode
== CODE_FOR_vsx_set_v2di
15036 || icode
== CODE_FOR_bcdadd
15037 || icode
== CODE_FOR_bcdadd_lt
15038 || icode
== CODE_FOR_bcdadd_eq
15039 || icode
== CODE_FOR_bcdadd_gt
15040 || icode
== CODE_FOR_bcdsub
15041 || icode
== CODE_FOR_bcdsub_lt
15042 || icode
== CODE_FOR_bcdsub_eq
15043 || icode
== CODE_FOR_bcdsub_gt
)
15045 /* Only allow 1-bit unsigned literals. */
15047 if (TREE_CODE (arg2
) != INTEGER_CST
15048 || TREE_INT_CST_LOW (arg2
) & ~0x1)
15050 error ("argument 3 must be a 1-bit unsigned literal");
15051 return CONST0_RTX (tmode
);
15054 else if (icode
== CODE_FOR_dfp_ddedpd_dd
15055 || icode
== CODE_FOR_dfp_ddedpd_td
)
15057 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
15059 if (TREE_CODE (arg0
) != INTEGER_CST
15060 || TREE_INT_CST_LOW (arg2
) & ~0x3)
15062 error ("argument 1 must be 0 or 2");
15063 return CONST0_RTX (tmode
);
15066 else if (icode
== CODE_FOR_dfp_denbcd_dd
15067 || icode
== CODE_FOR_dfp_denbcd_td
)
15069 /* Only allow 1-bit unsigned literals. */
15071 if (TREE_CODE (arg0
) != INTEGER_CST
15072 || TREE_INT_CST_LOW (arg0
) & ~0x1)
15074 error ("argument 1 must be a 1-bit unsigned literal");
15075 return CONST0_RTX (tmode
);
15078 else if (icode
== CODE_FOR_dfp_dscli_dd
15079 || icode
== CODE_FOR_dfp_dscli_td
15080 || icode
== CODE_FOR_dfp_dscri_dd
15081 || icode
== CODE_FOR_dfp_dscri_td
)
15083 /* Only allow 6-bit unsigned literals. */
15085 if (TREE_CODE (arg1
) != INTEGER_CST
15086 || TREE_INT_CST_LOW (arg1
) & ~0x3f)
15088 error ("argument 2 must be a 6-bit unsigned literal");
15089 return CONST0_RTX (tmode
);
15092 else if (icode
== CODE_FOR_crypto_vshasigmaw
15093 || icode
== CODE_FOR_crypto_vshasigmad
)
15095 /* Check whether the 2nd and 3rd arguments are integer constants and in
15096 range and prepare arguments. */
15098 if (TREE_CODE (arg1
) != INTEGER_CST
|| wi::geu_p (wi::to_wide (arg1
), 2))
15100 error ("argument 2 must be 0 or 1");
15101 return CONST0_RTX (tmode
);
15105 if (TREE_CODE (arg2
) != INTEGER_CST
15106 || wi::geu_p (wi::to_wide (arg2
), 16))
15108 error ("argument 3 must be in the range 0..15");
15109 return CONST0_RTX (tmode
);
15114 || GET_MODE (target
) != tmode
15115 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
15116 target
= gen_reg_rtx (tmode
);
15118 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
15119 op0
= copy_to_mode_reg (mode0
, op0
);
15120 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
15121 op1
= copy_to_mode_reg (mode1
, op1
);
15122 if (! (*insn_data
[icode
].operand
[3].predicate
) (op2
, mode2
))
15123 op2
= copy_to_mode_reg (mode2
, op2
);
15125 if (TARGET_PAIRED_FLOAT
&& icode
== CODE_FOR_selv2sf4
)
15126 pat
= GEN_FCN (icode
) (target
, op0
, op1
, op2
, CONST0_RTX (SFmode
));
15128 pat
= GEN_FCN (icode
) (target
, op0
, op1
, op2
);
15136 /* Expand the lvx builtins. */
15138 altivec_expand_ld_builtin (tree exp
, rtx target
, bool *expandedp
)
15140 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
15141 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
15143 machine_mode tmode
, mode0
;
15145 enum insn_code icode
;
15149 case ALTIVEC_BUILTIN_LD_INTERNAL_16qi
:
15150 icode
= CODE_FOR_vector_altivec_load_v16qi
;
15152 case ALTIVEC_BUILTIN_LD_INTERNAL_8hi
:
15153 icode
= CODE_FOR_vector_altivec_load_v8hi
;
15155 case ALTIVEC_BUILTIN_LD_INTERNAL_4si
:
15156 icode
= CODE_FOR_vector_altivec_load_v4si
;
15158 case ALTIVEC_BUILTIN_LD_INTERNAL_4sf
:
15159 icode
= CODE_FOR_vector_altivec_load_v4sf
;
15161 case ALTIVEC_BUILTIN_LD_INTERNAL_2df
:
15162 icode
= CODE_FOR_vector_altivec_load_v2df
;
15164 case ALTIVEC_BUILTIN_LD_INTERNAL_2di
:
15165 icode
= CODE_FOR_vector_altivec_load_v2di
;
15167 case ALTIVEC_BUILTIN_LD_INTERNAL_1ti
:
15168 icode
= CODE_FOR_vector_altivec_load_v1ti
;
15171 *expandedp
= false;
15177 arg0
= CALL_EXPR_ARG (exp
, 0);
15178 op0
= expand_normal (arg0
);
15179 tmode
= insn_data
[icode
].operand
[0].mode
;
15180 mode0
= insn_data
[icode
].operand
[1].mode
;
15183 || GET_MODE (target
) != tmode
15184 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
15185 target
= gen_reg_rtx (tmode
);
15187 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
15188 op0
= gen_rtx_MEM (mode0
, copy_to_mode_reg (Pmode
, op0
));
15190 pat
= GEN_FCN (icode
) (target
, op0
);
15197 /* Expand the stvx builtins. */
15199 altivec_expand_st_builtin (tree exp
, rtx target ATTRIBUTE_UNUSED
,
15202 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
15203 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
15205 machine_mode mode0
, mode1
;
15207 enum insn_code icode
;
15211 case ALTIVEC_BUILTIN_ST_INTERNAL_16qi
:
15212 icode
= CODE_FOR_vector_altivec_store_v16qi
;
15214 case ALTIVEC_BUILTIN_ST_INTERNAL_8hi
:
15215 icode
= CODE_FOR_vector_altivec_store_v8hi
;
15217 case ALTIVEC_BUILTIN_ST_INTERNAL_4si
:
15218 icode
= CODE_FOR_vector_altivec_store_v4si
;
15220 case ALTIVEC_BUILTIN_ST_INTERNAL_4sf
:
15221 icode
= CODE_FOR_vector_altivec_store_v4sf
;
15223 case ALTIVEC_BUILTIN_ST_INTERNAL_2df
:
15224 icode
= CODE_FOR_vector_altivec_store_v2df
;
15226 case ALTIVEC_BUILTIN_ST_INTERNAL_2di
:
15227 icode
= CODE_FOR_vector_altivec_store_v2di
;
15229 case ALTIVEC_BUILTIN_ST_INTERNAL_1ti
:
15230 icode
= CODE_FOR_vector_altivec_store_v1ti
;
15233 *expandedp
= false;
15237 arg0
= CALL_EXPR_ARG (exp
, 0);
15238 arg1
= CALL_EXPR_ARG (exp
, 1);
15239 op0
= expand_normal (arg0
);
15240 op1
= expand_normal (arg1
);
15241 mode0
= insn_data
[icode
].operand
[0].mode
;
15242 mode1
= insn_data
[icode
].operand
[1].mode
;
15244 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
15245 op0
= gen_rtx_MEM (mode0
, copy_to_mode_reg (Pmode
, op0
));
15246 if (! (*insn_data
[icode
].operand
[1].predicate
) (op1
, mode1
))
15247 op1
= copy_to_mode_reg (mode1
, op1
);
15249 pat
= GEN_FCN (icode
) (op0
, op1
);
15257 /* Expand the dst builtins. */
15259 altivec_expand_dst_builtin (tree exp
, rtx target ATTRIBUTE_UNUSED
,
15262 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
15263 enum rs6000_builtins fcode
= (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
15264 tree arg0
, arg1
, arg2
;
15265 machine_mode mode0
, mode1
;
15266 rtx pat
, op0
, op1
, op2
;
15267 const struct builtin_description
*d
;
15270 *expandedp
= false;
15272 /* Handle DST variants. */
15274 for (i
= 0; i
< ARRAY_SIZE (bdesc_dst
); i
++, d
++)
15275 if (d
->code
== fcode
)
15277 arg0
= CALL_EXPR_ARG (exp
, 0);
15278 arg1
= CALL_EXPR_ARG (exp
, 1);
15279 arg2
= CALL_EXPR_ARG (exp
, 2);
15280 op0
= expand_normal (arg0
);
15281 op1
= expand_normal (arg1
);
15282 op2
= expand_normal (arg2
);
15283 mode0
= insn_data
[d
->icode
].operand
[0].mode
;
15284 mode1
= insn_data
[d
->icode
].operand
[1].mode
;
15286 /* Invalid arguments, bail out before generating bad rtl. */
15287 if (arg0
== error_mark_node
15288 || arg1
== error_mark_node
15289 || arg2
== error_mark_node
)
15294 if (TREE_CODE (arg2
) != INTEGER_CST
15295 || TREE_INT_CST_LOW (arg2
) & ~0x3)
15297 error ("argument to %qs must be a 2-bit unsigned literal", d
->name
);
15301 if (! (*insn_data
[d
->icode
].operand
[0].predicate
) (op0
, mode0
))
15302 op0
= copy_to_mode_reg (Pmode
, op0
);
15303 if (! (*insn_data
[d
->icode
].operand
[1].predicate
) (op1
, mode1
))
15304 op1
= copy_to_mode_reg (mode1
, op1
);
15306 pat
= GEN_FCN (d
->icode
) (op0
, op1
, op2
);
15316 /* Expand vec_init builtin. */
15318 altivec_expand_vec_init_builtin (tree type
, tree exp
, rtx target
)
15320 machine_mode tmode
= TYPE_MODE (type
);
15321 machine_mode inner_mode
= GET_MODE_INNER (tmode
);
15322 int i
, n_elt
= GET_MODE_NUNITS (tmode
);
15324 gcc_assert (VECTOR_MODE_P (tmode
));
15325 gcc_assert (n_elt
== call_expr_nargs (exp
));
15327 if (!target
|| !register_operand (target
, tmode
))
15328 target
= gen_reg_rtx (tmode
);
15330 /* If we have a vector compromised of a single element, such as V1TImode, do
15331 the initialization directly. */
15332 if (n_elt
== 1 && GET_MODE_SIZE (tmode
) == GET_MODE_SIZE (inner_mode
))
15334 rtx x
= expand_normal (CALL_EXPR_ARG (exp
, 0));
15335 emit_move_insn (target
, gen_lowpart (tmode
, x
));
15339 rtvec v
= rtvec_alloc (n_elt
);
15341 for (i
= 0; i
< n_elt
; ++i
)
15343 rtx x
= expand_normal (CALL_EXPR_ARG (exp
, i
));
15344 RTVEC_ELT (v
, i
) = gen_lowpart (inner_mode
, x
);
15347 rs6000_expand_vector_init (target
, gen_rtx_PARALLEL (tmode
, v
));
15353 /* Return the integer constant in ARG. Constrain it to be in the range
15354 of the subparts of VEC_TYPE; issue an error if not. */
15357 get_element_number (tree vec_type
, tree arg
)
15359 unsigned HOST_WIDE_INT elt
, max
= TYPE_VECTOR_SUBPARTS (vec_type
) - 1;
15361 if (!tree_fits_uhwi_p (arg
)
15362 || (elt
= tree_to_uhwi (arg
), elt
> max
))
15364 error ("selector must be an integer constant in the range 0..%wi", max
);
15371 /* Expand vec_set builtin. */
15373 altivec_expand_vec_set_builtin (tree exp
)
15375 machine_mode tmode
, mode1
;
15376 tree arg0
, arg1
, arg2
;
15380 arg0
= CALL_EXPR_ARG (exp
, 0);
15381 arg1
= CALL_EXPR_ARG (exp
, 1);
15382 arg2
= CALL_EXPR_ARG (exp
, 2);
15384 tmode
= TYPE_MODE (TREE_TYPE (arg0
));
15385 mode1
= TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0
)));
15386 gcc_assert (VECTOR_MODE_P (tmode
));
15388 op0
= expand_expr (arg0
, NULL_RTX
, tmode
, EXPAND_NORMAL
);
15389 op1
= expand_expr (arg1
, NULL_RTX
, mode1
, EXPAND_NORMAL
);
15390 elt
= get_element_number (TREE_TYPE (arg0
), arg2
);
15392 if (GET_MODE (op1
) != mode1
&& GET_MODE (op1
) != VOIDmode
)
15393 op1
= convert_modes (mode1
, GET_MODE (op1
), op1
, true);
15395 op0
= force_reg (tmode
, op0
);
15396 op1
= force_reg (mode1
, op1
);
15398 rs6000_expand_vector_set (op0
, op1
, elt
);
15403 /* Expand vec_ext builtin. */
15405 altivec_expand_vec_ext_builtin (tree exp
, rtx target
)
15407 machine_mode tmode
, mode0
;
15412 arg0
= CALL_EXPR_ARG (exp
, 0);
15413 arg1
= CALL_EXPR_ARG (exp
, 1);
15415 op0
= expand_normal (arg0
);
15416 op1
= expand_normal (arg1
);
15418 /* Call get_element_number to validate arg1 if it is a constant. */
15419 if (TREE_CODE (arg1
) == INTEGER_CST
)
15420 (void) get_element_number (TREE_TYPE (arg0
), arg1
);
15422 tmode
= TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0
)));
15423 mode0
= TYPE_MODE (TREE_TYPE (arg0
));
15424 gcc_assert (VECTOR_MODE_P (mode0
));
15426 op0
= force_reg (mode0
, op0
);
15428 if (optimize
|| !target
|| !register_operand (target
, tmode
))
15429 target
= gen_reg_rtx (tmode
);
15431 rs6000_expand_vector_extract (target
, op0
, op1
);
15436 /* Expand the builtin in EXP and store the result in TARGET. Store
15437 true in *EXPANDEDP if we found a builtin to expand. */
15439 altivec_expand_builtin (tree exp
, rtx target
, bool *expandedp
)
15441 const struct builtin_description
*d
;
15443 enum insn_code icode
;
15444 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
15445 tree arg0
, arg1
, arg2
;
15447 machine_mode tmode
, mode0
;
15448 enum rs6000_builtins fcode
15449 = (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
15451 if (rs6000_overloaded_builtin_p (fcode
))
15454 error ("unresolved overload for Altivec builtin %qF", fndecl
);
15456 /* Given it is invalid, just generate a normal call. */
15457 return expand_call (exp
, target
, false);
15460 target
= altivec_expand_ld_builtin (exp
, target
, expandedp
);
15464 target
= altivec_expand_st_builtin (exp
, target
, expandedp
);
15468 target
= altivec_expand_dst_builtin (exp
, target
, expandedp
);
15476 case ALTIVEC_BUILTIN_STVX_V2DF
:
15477 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df_2op
, exp
);
15478 case ALTIVEC_BUILTIN_STVX_V2DI
:
15479 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di_2op
, exp
);
15480 case ALTIVEC_BUILTIN_STVX_V4SF
:
15481 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf_2op
, exp
);
15482 case ALTIVEC_BUILTIN_STVX
:
15483 case ALTIVEC_BUILTIN_STVX_V4SI
:
15484 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si_2op
, exp
);
15485 case ALTIVEC_BUILTIN_STVX_V8HI
:
15486 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi_2op
, exp
);
15487 case ALTIVEC_BUILTIN_STVX_V16QI
:
15488 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi_2op
, exp
);
15489 case ALTIVEC_BUILTIN_STVEBX
:
15490 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx
, exp
);
15491 case ALTIVEC_BUILTIN_STVEHX
:
15492 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx
, exp
);
15493 case ALTIVEC_BUILTIN_STVEWX
:
15494 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx
, exp
);
15495 case ALTIVEC_BUILTIN_STVXL_V2DF
:
15496 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df
, exp
);
15497 case ALTIVEC_BUILTIN_STVXL_V2DI
:
15498 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di
, exp
);
15499 case ALTIVEC_BUILTIN_STVXL_V4SF
:
15500 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf
, exp
);
15501 case ALTIVEC_BUILTIN_STVXL
:
15502 case ALTIVEC_BUILTIN_STVXL_V4SI
:
15503 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si
, exp
);
15504 case ALTIVEC_BUILTIN_STVXL_V8HI
:
15505 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi
, exp
);
15506 case ALTIVEC_BUILTIN_STVXL_V16QI
:
15507 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi
, exp
);
15509 case ALTIVEC_BUILTIN_STVLX
:
15510 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx
, exp
);
15511 case ALTIVEC_BUILTIN_STVLXL
:
15512 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl
, exp
);
15513 case ALTIVEC_BUILTIN_STVRX
:
15514 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx
, exp
);
15515 case ALTIVEC_BUILTIN_STVRXL
:
15516 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl
, exp
);
15518 case P9V_BUILTIN_STXVL
:
15519 return altivec_expand_stxvl_builtin (CODE_FOR_stxvl
, exp
);
15521 case P9V_BUILTIN_XST_LEN_R
:
15522 return altivec_expand_stxvl_builtin (CODE_FOR_xst_len_r
, exp
);
15524 case VSX_BUILTIN_STXVD2X_V1TI
:
15525 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti
, exp
);
15526 case VSX_BUILTIN_STXVD2X_V2DF
:
15527 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df
, exp
);
15528 case VSX_BUILTIN_STXVD2X_V2DI
:
15529 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di
, exp
);
15530 case VSX_BUILTIN_STXVW4X_V4SF
:
15531 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf
, exp
);
15532 case VSX_BUILTIN_STXVW4X_V4SI
:
15533 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si
, exp
);
15534 case VSX_BUILTIN_STXVW4X_V8HI
:
15535 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi
, exp
);
15536 case VSX_BUILTIN_STXVW4X_V16QI
:
15537 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi
, exp
);
15539 /* For the following on big endian, it's ok to use any appropriate
15540 unaligned-supporting store, so use a generic expander. For
15541 little-endian, the exact element-reversing instruction must
15543 case VSX_BUILTIN_ST_ELEMREV_V2DF
:
15545 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_store_v2df
15546 : CODE_FOR_vsx_st_elemrev_v2df
);
15547 return altivec_expand_stv_builtin (code
, exp
);
15549 case VSX_BUILTIN_ST_ELEMREV_V2DI
:
15551 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_store_v2di
15552 : CODE_FOR_vsx_st_elemrev_v2di
);
15553 return altivec_expand_stv_builtin (code
, exp
);
15555 case VSX_BUILTIN_ST_ELEMREV_V4SF
:
15557 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_store_v4sf
15558 : CODE_FOR_vsx_st_elemrev_v4sf
);
15559 return altivec_expand_stv_builtin (code
, exp
);
15561 case VSX_BUILTIN_ST_ELEMREV_V4SI
:
15563 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_store_v4si
15564 : CODE_FOR_vsx_st_elemrev_v4si
);
15565 return altivec_expand_stv_builtin (code
, exp
);
15567 case VSX_BUILTIN_ST_ELEMREV_V8HI
:
15569 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_store_v8hi
15570 : CODE_FOR_vsx_st_elemrev_v8hi
);
15571 return altivec_expand_stv_builtin (code
, exp
);
15573 case VSX_BUILTIN_ST_ELEMREV_V16QI
:
15575 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_store_v16qi
15576 : CODE_FOR_vsx_st_elemrev_v16qi
);
15577 return altivec_expand_stv_builtin (code
, exp
);
15580 case ALTIVEC_BUILTIN_MFVSCR
:
15581 icode
= CODE_FOR_altivec_mfvscr
;
15582 tmode
= insn_data
[icode
].operand
[0].mode
;
15585 || GET_MODE (target
) != tmode
15586 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
15587 target
= gen_reg_rtx (tmode
);
15589 pat
= GEN_FCN (icode
) (target
);
15595 case ALTIVEC_BUILTIN_MTVSCR
:
15596 icode
= CODE_FOR_altivec_mtvscr
;
15597 arg0
= CALL_EXPR_ARG (exp
, 0);
15598 op0
= expand_normal (arg0
);
15599 mode0
= insn_data
[icode
].operand
[0].mode
;
15601 /* If we got invalid arguments bail out before generating bad rtl. */
15602 if (arg0
== error_mark_node
)
15605 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
15606 op0
= copy_to_mode_reg (mode0
, op0
);
15608 pat
= GEN_FCN (icode
) (op0
);
15613 case ALTIVEC_BUILTIN_DSSALL
:
15614 emit_insn (gen_altivec_dssall ());
15617 case ALTIVEC_BUILTIN_DSS
:
15618 icode
= CODE_FOR_altivec_dss
;
15619 arg0
= CALL_EXPR_ARG (exp
, 0);
15621 op0
= expand_normal (arg0
);
15622 mode0
= insn_data
[icode
].operand
[0].mode
;
15624 /* If we got invalid arguments bail out before generating bad rtl. */
15625 if (arg0
== error_mark_node
)
15628 if (TREE_CODE (arg0
) != INTEGER_CST
15629 || TREE_INT_CST_LOW (arg0
) & ~0x3)
15631 error ("argument to %qs must be a 2-bit unsigned literal", "dss");
15635 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
15636 op0
= copy_to_mode_reg (mode0
, op0
);
15638 emit_insn (gen_altivec_dss (op0
));
15641 case ALTIVEC_BUILTIN_VEC_INIT_V4SI
:
15642 case ALTIVEC_BUILTIN_VEC_INIT_V8HI
:
15643 case ALTIVEC_BUILTIN_VEC_INIT_V16QI
:
15644 case ALTIVEC_BUILTIN_VEC_INIT_V4SF
:
15645 case VSX_BUILTIN_VEC_INIT_V2DF
:
15646 case VSX_BUILTIN_VEC_INIT_V2DI
:
15647 case VSX_BUILTIN_VEC_INIT_V1TI
:
15648 return altivec_expand_vec_init_builtin (TREE_TYPE (exp
), exp
, target
);
15650 case ALTIVEC_BUILTIN_VEC_SET_V4SI
:
15651 case ALTIVEC_BUILTIN_VEC_SET_V8HI
:
15652 case ALTIVEC_BUILTIN_VEC_SET_V16QI
:
15653 case ALTIVEC_BUILTIN_VEC_SET_V4SF
:
15654 case VSX_BUILTIN_VEC_SET_V2DF
:
15655 case VSX_BUILTIN_VEC_SET_V2DI
:
15656 case VSX_BUILTIN_VEC_SET_V1TI
:
15657 return altivec_expand_vec_set_builtin (exp
);
15659 case ALTIVEC_BUILTIN_VEC_EXT_V4SI
:
15660 case ALTIVEC_BUILTIN_VEC_EXT_V8HI
:
15661 case ALTIVEC_BUILTIN_VEC_EXT_V16QI
:
15662 case ALTIVEC_BUILTIN_VEC_EXT_V4SF
:
15663 case VSX_BUILTIN_VEC_EXT_V2DF
:
15664 case VSX_BUILTIN_VEC_EXT_V2DI
:
15665 case VSX_BUILTIN_VEC_EXT_V1TI
:
15666 return altivec_expand_vec_ext_builtin (exp
, target
);
15668 case P9V_BUILTIN_VEXTRACT4B
:
15669 case P9V_BUILTIN_VEC_VEXTRACT4B
:
15670 arg1
= CALL_EXPR_ARG (exp
, 1);
15673 /* Generate a normal call if it is invalid. */
15674 if (arg1
== error_mark_node
)
15675 return expand_call (exp
, target
, false);
15677 if (TREE_CODE (arg1
) != INTEGER_CST
|| TREE_INT_CST_LOW (arg1
) > 12)
15679 error ("second argument to %qs must be 0..12", "vec_vextract4b");
15680 return expand_call (exp
, target
, false);
15684 case P9V_BUILTIN_VINSERT4B
:
15685 case P9V_BUILTIN_VINSERT4B_DI
:
15686 case P9V_BUILTIN_VEC_VINSERT4B
:
15687 arg2
= CALL_EXPR_ARG (exp
, 2);
15690 /* Generate a normal call if it is invalid. */
15691 if (arg2
== error_mark_node
)
15692 return expand_call (exp
, target
, false);
15694 if (TREE_CODE (arg2
) != INTEGER_CST
|| TREE_INT_CST_LOW (arg2
) > 12)
15696 error ("third argument to %qs must be 0..12", "vec_vinsert4b");
15697 return expand_call (exp
, target
, false);
15703 /* Fall through. */
15706 /* Expand abs* operations. */
15708 for (i
= 0; i
< ARRAY_SIZE (bdesc_abs
); i
++, d
++)
15709 if (d
->code
== fcode
)
15710 return altivec_expand_abs_builtin (d
->icode
, exp
, target
);
15712 /* Expand the AltiVec predicates. */
15713 d
= bdesc_altivec_preds
;
15714 for (i
= 0; i
< ARRAY_SIZE (bdesc_altivec_preds
); i
++, d
++)
15715 if (d
->code
== fcode
)
15716 return altivec_expand_predicate_builtin (d
->icode
, exp
, target
);
15718 /* LV* are funky. We initialized them differently. */
15721 case ALTIVEC_BUILTIN_LVSL
:
15722 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl
,
15723 exp
, target
, false);
15724 case ALTIVEC_BUILTIN_LVSR
:
15725 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr
,
15726 exp
, target
, false);
15727 case ALTIVEC_BUILTIN_LVEBX
:
15728 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx
,
15729 exp
, target
, false);
15730 case ALTIVEC_BUILTIN_LVEHX
:
15731 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx
,
15732 exp
, target
, false);
15733 case ALTIVEC_BUILTIN_LVEWX
:
15734 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx
,
15735 exp
, target
, false);
15736 case ALTIVEC_BUILTIN_LVXL_V2DF
:
15737 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df
,
15738 exp
, target
, false);
15739 case ALTIVEC_BUILTIN_LVXL_V2DI
:
15740 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di
,
15741 exp
, target
, false);
15742 case ALTIVEC_BUILTIN_LVXL_V4SF
:
15743 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf
,
15744 exp
, target
, false);
15745 case ALTIVEC_BUILTIN_LVXL
:
15746 case ALTIVEC_BUILTIN_LVXL_V4SI
:
15747 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si
,
15748 exp
, target
, false);
15749 case ALTIVEC_BUILTIN_LVXL_V8HI
:
15750 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi
,
15751 exp
, target
, false);
15752 case ALTIVEC_BUILTIN_LVXL_V16QI
:
15753 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi
,
15754 exp
, target
, false);
15755 case ALTIVEC_BUILTIN_LVX_V2DF
:
15756 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df_2op
,
15757 exp
, target
, false);
15758 case ALTIVEC_BUILTIN_LVX_V2DI
:
15759 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di_2op
,
15760 exp
, target
, false);
15761 case ALTIVEC_BUILTIN_LVX_V4SF
:
15762 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf_2op
,
15763 exp
, target
, false);
15764 case ALTIVEC_BUILTIN_LVX
:
15765 case ALTIVEC_BUILTIN_LVX_V4SI
:
15766 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si_2op
,
15767 exp
, target
, false);
15768 case ALTIVEC_BUILTIN_LVX_V8HI
:
15769 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi_2op
,
15770 exp
, target
, false);
15771 case ALTIVEC_BUILTIN_LVX_V16QI
:
15772 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi_2op
,
15773 exp
, target
, false);
15774 case ALTIVEC_BUILTIN_LVLX
:
15775 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx
,
15776 exp
, target
, true);
15777 case ALTIVEC_BUILTIN_LVLXL
:
15778 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl
,
15779 exp
, target
, true);
15780 case ALTIVEC_BUILTIN_LVRX
:
15781 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx
,
15782 exp
, target
, true);
15783 case ALTIVEC_BUILTIN_LVRXL
:
15784 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl
,
15785 exp
, target
, true);
15786 case VSX_BUILTIN_LXVD2X_V1TI
:
15787 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti
,
15788 exp
, target
, false);
15789 case VSX_BUILTIN_LXVD2X_V2DF
:
15790 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df
,
15791 exp
, target
, false);
15792 case VSX_BUILTIN_LXVD2X_V2DI
:
15793 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di
,
15794 exp
, target
, false);
15795 case VSX_BUILTIN_LXVW4X_V4SF
:
15796 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf
,
15797 exp
, target
, false);
15798 case VSX_BUILTIN_LXVW4X_V4SI
:
15799 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si
,
15800 exp
, target
, false);
15801 case VSX_BUILTIN_LXVW4X_V8HI
:
15802 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi
,
15803 exp
, target
, false);
15804 case VSX_BUILTIN_LXVW4X_V16QI
:
15805 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi
,
15806 exp
, target
, false);
15807 /* For the following on big endian, it's ok to use any appropriate
15808 unaligned-supporting load, so use a generic expander. For
15809 little-endian, the exact element-reversing instruction must
15811 case VSX_BUILTIN_LD_ELEMREV_V2DF
:
15813 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_load_v2df
15814 : CODE_FOR_vsx_ld_elemrev_v2df
);
15815 return altivec_expand_lv_builtin (code
, exp
, target
, false);
15817 case VSX_BUILTIN_LD_ELEMREV_V2DI
:
15819 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_load_v2di
15820 : CODE_FOR_vsx_ld_elemrev_v2di
);
15821 return altivec_expand_lv_builtin (code
, exp
, target
, false);
15823 case VSX_BUILTIN_LD_ELEMREV_V4SF
:
15825 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_load_v4sf
15826 : CODE_FOR_vsx_ld_elemrev_v4sf
);
15827 return altivec_expand_lv_builtin (code
, exp
, target
, false);
15829 case VSX_BUILTIN_LD_ELEMREV_V4SI
:
15831 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_load_v4si
15832 : CODE_FOR_vsx_ld_elemrev_v4si
);
15833 return altivec_expand_lv_builtin (code
, exp
, target
, false);
15835 case VSX_BUILTIN_LD_ELEMREV_V8HI
:
15837 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_load_v8hi
15838 : CODE_FOR_vsx_ld_elemrev_v8hi
);
15839 return altivec_expand_lv_builtin (code
, exp
, target
, false);
15841 case VSX_BUILTIN_LD_ELEMREV_V16QI
:
15843 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_load_v16qi
15844 : CODE_FOR_vsx_ld_elemrev_v16qi
);
15845 return altivec_expand_lv_builtin (code
, exp
, target
, false);
15850 /* Fall through. */
15853 /* XL_BE We initialized them to always load in big endian order. */
15856 case VSX_BUILTIN_XL_BE_V2DI
:
15858 enum insn_code code
= CODE_FOR_vsx_load_v2di
;
15859 return altivec_expand_xl_be_builtin (code
, exp
, target
, false);
15862 case VSX_BUILTIN_XL_BE_V4SI
:
15864 enum insn_code code
= CODE_FOR_vsx_load_v4si
;
15865 return altivec_expand_xl_be_builtin (code
, exp
, target
, false);
15868 case VSX_BUILTIN_XL_BE_V8HI
:
15870 enum insn_code code
= CODE_FOR_vsx_load_v8hi
;
15871 return altivec_expand_xl_be_builtin (code
, exp
, target
, false);
15874 case VSX_BUILTIN_XL_BE_V16QI
:
15876 enum insn_code code
= CODE_FOR_vsx_load_v16qi
;
15877 return altivec_expand_xl_be_builtin (code
, exp
, target
, false);
15880 case VSX_BUILTIN_XL_BE_V2DF
:
15882 enum insn_code code
= CODE_FOR_vsx_load_v2df
;
15883 return altivec_expand_xl_be_builtin (code
, exp
, target
, false);
15886 case VSX_BUILTIN_XL_BE_V4SF
:
15888 enum insn_code code
= CODE_FOR_vsx_load_v4sf
;
15889 return altivec_expand_xl_be_builtin (code
, exp
, target
, false);
15894 /* Fall through. */
15897 *expandedp
= false;
15901 /* Expand the builtin in EXP and store the result in TARGET. Store
15902 true in *EXPANDEDP if we found a builtin to expand. */
15904 paired_expand_builtin (tree exp
, rtx target
, bool * expandedp
)
15906 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
15907 enum rs6000_builtins fcode
= (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
15908 const struct builtin_description
*d
;
15915 case PAIRED_BUILTIN_STX
:
15916 return paired_expand_stv_builtin (CODE_FOR_paired_stx
, exp
);
15917 case PAIRED_BUILTIN_LX
:
15918 return paired_expand_lv_builtin (CODE_FOR_paired_lx
, exp
, target
);
15921 /* Fall through. */
15924 /* Expand the paired predicates. */
15925 d
= bdesc_paired_preds
;
15926 for (i
= 0; i
< ARRAY_SIZE (bdesc_paired_preds
); i
++, d
++)
15927 if (d
->code
== fcode
)
15928 return paired_expand_predicate_builtin (d
->icode
, exp
, target
);
15930 *expandedp
= false;
15935 paired_expand_predicate_builtin (enum insn_code icode
, tree exp
, rtx target
)
15937 rtx pat
, scratch
, tmp
;
15938 tree form
= CALL_EXPR_ARG (exp
, 0);
15939 tree arg0
= CALL_EXPR_ARG (exp
, 1);
15940 tree arg1
= CALL_EXPR_ARG (exp
, 2);
15941 rtx op0
= expand_normal (arg0
);
15942 rtx op1
= expand_normal (arg1
);
15943 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
15944 machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
15946 enum rtx_code code
;
15948 if (TREE_CODE (form
) != INTEGER_CST
)
15950 error ("argument 1 of %s must be a constant",
15951 "__builtin_paired_predicate");
15955 form_int
= TREE_INT_CST_LOW (form
);
15957 gcc_assert (mode0
== mode1
);
15959 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
15963 || GET_MODE (target
) != SImode
15964 || !(*insn_data
[icode
].operand
[0].predicate
) (target
, SImode
))
15965 target
= gen_reg_rtx (SImode
);
15966 if (!(*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
15967 op0
= copy_to_mode_reg (mode0
, op0
);
15968 if (!(*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
15969 op1
= copy_to_mode_reg (mode1
, op1
);
15971 scratch
= gen_reg_rtx (CCFPmode
);
15973 pat
= GEN_FCN (icode
) (scratch
, op0
, op1
);
15995 emit_insn (gen_move_from_CR_ov_bit (target
, scratch
));
15998 error ("argument 1 of %qs is out of range",
15999 "__builtin_paired_predicate");
16003 tmp
= gen_rtx_fmt_ee (code
, SImode
, scratch
, const0_rtx
);
16004 emit_move_insn (target
, tmp
);
16008 /* Raise an error message for a builtin function that is called without the
16009 appropriate target options being set. */
16012 rs6000_invalid_builtin (enum rs6000_builtins fncode
)
16014 size_t uns_fncode
= (size_t) fncode
;
16015 const char *name
= rs6000_builtin_info
[uns_fncode
].name
;
16016 HOST_WIDE_INT fnmask
= rs6000_builtin_info
[uns_fncode
].mask
;
16018 gcc_assert (name
!= NULL
);
16019 if ((fnmask
& RS6000_BTM_CELL
) != 0)
16020 error ("builtin function %qs is only valid for the cell processor", name
);
16021 else if ((fnmask
& RS6000_BTM_VSX
) != 0)
16022 error ("builtin function %qs requires the %qs option", name
, "-mvsx");
16023 else if ((fnmask
& RS6000_BTM_HTM
) != 0)
16024 error ("builtin function %qs requires the %qs option", name
, "-mhtm");
16025 else if ((fnmask
& RS6000_BTM_ALTIVEC
) != 0)
16026 error ("builtin function %qs requires the %qs option", name
, "-maltivec");
16027 else if ((fnmask
& RS6000_BTM_PAIRED
) != 0)
16028 error ("builtin function %qs requires the %qs option", name
, "-mpaired");
16029 else if ((fnmask
& (RS6000_BTM_DFP
| RS6000_BTM_P8_VECTOR
))
16030 == (RS6000_BTM_DFP
| RS6000_BTM_P8_VECTOR
))
16031 error ("builtin function %qs requires the %qs and %qs options",
16032 name
, "-mhard-dfp", "-mpower8-vector");
16033 else if ((fnmask
& RS6000_BTM_DFP
) != 0)
16034 error ("builtin function %qs requires the %qs option", name
, "-mhard-dfp");
16035 else if ((fnmask
& RS6000_BTM_P8_VECTOR
) != 0)
16036 error ("builtin function %qs requires the %qs option", name
,
16037 "-mpower8-vector");
16038 else if ((fnmask
& (RS6000_BTM_P9_VECTOR
| RS6000_BTM_64BIT
))
16039 == (RS6000_BTM_P9_VECTOR
| RS6000_BTM_64BIT
))
16040 error ("builtin function %qs requires the %qs and %qs options",
16041 name
, "-mcpu=power9", "-m64");
16042 else if ((fnmask
& RS6000_BTM_P9_VECTOR
) != 0)
16043 error ("builtin function %qs requires the %qs option", name
,
16045 else if ((fnmask
& (RS6000_BTM_P9_MISC
| RS6000_BTM_64BIT
))
16046 == (RS6000_BTM_P9_MISC
| RS6000_BTM_64BIT
))
16047 error ("builtin function %qs requires the %qs and %qs options",
16048 name
, "-mcpu=power9", "-m64");
16049 else if ((fnmask
& RS6000_BTM_P9_MISC
) == RS6000_BTM_P9_MISC
)
16050 error ("builtin function %qs requires the %qs option", name
,
16052 else if ((fnmask
& (RS6000_BTM_HARD_FLOAT
| RS6000_BTM_LDBL128
))
16053 == (RS6000_BTM_HARD_FLOAT
| RS6000_BTM_LDBL128
))
16054 error ("builtin function %qs requires the %qs and %qs options",
16055 name
, "-mhard-float", "-mlong-double-128");
16056 else if ((fnmask
& RS6000_BTM_HARD_FLOAT
) != 0)
16057 error ("builtin function %qs requires the %qs option", name
,
16059 else if ((fnmask
& RS6000_BTM_FLOAT128_HW
) != 0)
16060 error ("builtin function %qs requires ISA 3.0 IEEE 128-bit floating point",
16062 else if ((fnmask
& RS6000_BTM_FLOAT128
) != 0)
16063 error ("builtin function %qs requires the %qs option", name
, "-mfloat128");
16065 error ("builtin function %qs is not supported with the current options",
16069 /* Target hook for early folding of built-ins, shamelessly stolen
16073 rs6000_fold_builtin (tree fndecl
, int n_args ATTRIBUTE_UNUSED
,
16074 tree
*args
, bool ignore ATTRIBUTE_UNUSED
)
16076 if (DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_MD
)
16078 enum rs6000_builtins fn_code
16079 = (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
16082 case RS6000_BUILTIN_NANQ
:
16083 case RS6000_BUILTIN_NANSQ
:
16085 tree type
= TREE_TYPE (TREE_TYPE (fndecl
));
16086 const char *str
= c_getstr (*args
);
16087 int quiet
= fn_code
== RS6000_BUILTIN_NANQ
;
16088 REAL_VALUE_TYPE real
;
16090 if (str
&& real_nan (&real
, str
, quiet
, TYPE_MODE (type
)))
16091 return build_real (type
, real
);
16094 case RS6000_BUILTIN_INFQ
:
16095 case RS6000_BUILTIN_HUGE_VALQ
:
16097 tree type
= TREE_TYPE (TREE_TYPE (fndecl
));
16098 REAL_VALUE_TYPE inf
;
16100 return build_real (type
, inf
);
16106 #ifdef SUBTARGET_FOLD_BUILTIN
16107 return SUBTARGET_FOLD_BUILTIN (fndecl
, n_args
, args
, ignore
);
16113 /* Helper function to sort out which built-ins may be valid without having
16116 rs6000_builtin_valid_without_lhs (enum rs6000_builtins fn_code
)
16120 case ALTIVEC_BUILTIN_STVX_V16QI
:
16121 case ALTIVEC_BUILTIN_STVX_V8HI
:
16122 case ALTIVEC_BUILTIN_STVX_V4SI
:
16123 case ALTIVEC_BUILTIN_STVX_V4SF
:
16124 case ALTIVEC_BUILTIN_STVX_V2DI
:
16125 case ALTIVEC_BUILTIN_STVX_V2DF
:
16132 /* Fold a machine-dependent built-in in GIMPLE. (For folding into
16133 a constant, use rs6000_fold_builtin.) */
16136 rs6000_gimple_fold_builtin (gimple_stmt_iterator
*gsi
)
16138 gimple
*stmt
= gsi_stmt (*gsi
);
16139 tree fndecl
= gimple_call_fndecl (stmt
);
16140 gcc_checking_assert (fndecl
&& DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_MD
);
16141 enum rs6000_builtins fn_code
16142 = (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
16143 tree arg0
, arg1
, lhs
;
16145 size_t uns_fncode
= (size_t) fn_code
;
16146 enum insn_code icode
= rs6000_builtin_info
[uns_fncode
].icode
;
16147 const char *fn_name1
= rs6000_builtin_info
[uns_fncode
].name
;
16148 const char *fn_name2
= (icode
!= CODE_FOR_nothing
)
16149 ? get_insn_name ((int) icode
)
16152 if (TARGET_DEBUG_BUILTIN
)
16153 fprintf (stderr
, "rs6000_gimple_fold_builtin %d %s %s\n",
16154 fn_code
, fn_name1
, fn_name2
);
16156 if (!rs6000_fold_gimple
)
16159 /* Prevent gimple folding for code that does not have a LHS, unless it is
16160 allowed per the rs6000_builtin_valid_without_lhs helper function. */
16161 if (!gimple_call_lhs (stmt
) && !rs6000_builtin_valid_without_lhs (fn_code
))
16166 /* Flavors of vec_add. We deliberately don't expand
16167 P8V_BUILTIN_VADDUQM as it gets lowered from V1TImode to
16168 TImode, resulting in much poorer code generation. */
16169 case ALTIVEC_BUILTIN_VADDUBM
:
16170 case ALTIVEC_BUILTIN_VADDUHM
:
16171 case ALTIVEC_BUILTIN_VADDUWM
:
16172 case P8V_BUILTIN_VADDUDM
:
16173 case ALTIVEC_BUILTIN_VADDFP
:
16174 case VSX_BUILTIN_XVADDDP
:
16176 arg0
= gimple_call_arg (stmt
, 0);
16177 arg1
= gimple_call_arg (stmt
, 1);
16178 lhs
= gimple_call_lhs (stmt
);
16179 gimple
*g
= gimple_build_assign (lhs
, PLUS_EXPR
, arg0
, arg1
);
16180 gimple_set_location (g
, gimple_location (stmt
));
16181 gsi_replace (gsi
, g
, true);
16184 /* Flavors of vec_sub. We deliberately don't expand
16185 P8V_BUILTIN_VSUBUQM. */
16186 case ALTIVEC_BUILTIN_VSUBUBM
:
16187 case ALTIVEC_BUILTIN_VSUBUHM
:
16188 case ALTIVEC_BUILTIN_VSUBUWM
:
16189 case P8V_BUILTIN_VSUBUDM
:
16190 case ALTIVEC_BUILTIN_VSUBFP
:
16191 case VSX_BUILTIN_XVSUBDP
:
16193 arg0
= gimple_call_arg (stmt
, 0);
16194 arg1
= gimple_call_arg (stmt
, 1);
16195 lhs
= gimple_call_lhs (stmt
);
16196 gimple
*g
= gimple_build_assign (lhs
, MINUS_EXPR
, arg0
, arg1
);
16197 gimple_set_location (g
, gimple_location (stmt
));
16198 gsi_replace (gsi
, g
, true);
16201 case VSX_BUILTIN_XVMULSP
:
16202 case VSX_BUILTIN_XVMULDP
:
16204 arg0
= gimple_call_arg (stmt
, 0);
16205 arg1
= gimple_call_arg (stmt
, 1);
16206 lhs
= gimple_call_lhs (stmt
);
16207 gimple
*g
= gimple_build_assign (lhs
, MULT_EXPR
, arg0
, arg1
);
16208 gimple_set_location (g
, gimple_location (stmt
));
16209 gsi_replace (gsi
, g
, true);
16212 /* Even element flavors of vec_mul (signed). */
16213 case ALTIVEC_BUILTIN_VMULESB
:
16214 case ALTIVEC_BUILTIN_VMULESH
:
16215 case ALTIVEC_BUILTIN_VMULESW
:
16216 /* Even element flavors of vec_mul (unsigned). */
16217 case ALTIVEC_BUILTIN_VMULEUB
:
16218 case ALTIVEC_BUILTIN_VMULEUH
:
16219 case ALTIVEC_BUILTIN_VMULEUW
:
16221 arg0
= gimple_call_arg (stmt
, 0);
16222 arg1
= gimple_call_arg (stmt
, 1);
16223 lhs
= gimple_call_lhs (stmt
);
16224 gimple
*g
= gimple_build_assign (lhs
, VEC_WIDEN_MULT_EVEN_EXPR
, arg0
, arg1
);
16225 gimple_set_location (g
, gimple_location (stmt
));
16226 gsi_replace (gsi
, g
, true);
16229 /* Odd element flavors of vec_mul (signed). */
16230 case ALTIVEC_BUILTIN_VMULOSB
:
16231 case ALTIVEC_BUILTIN_VMULOSH
:
16232 case ALTIVEC_BUILTIN_VMULOSW
:
16233 /* Odd element flavors of vec_mul (unsigned). */
16234 case ALTIVEC_BUILTIN_VMULOUB
:
16235 case ALTIVEC_BUILTIN_VMULOUH
:
16236 case ALTIVEC_BUILTIN_VMULOUW
:
16238 arg0
= gimple_call_arg (stmt
, 0);
16239 arg1
= gimple_call_arg (stmt
, 1);
16240 lhs
= gimple_call_lhs (stmt
);
16241 gimple
*g
= gimple_build_assign (lhs
, VEC_WIDEN_MULT_ODD_EXPR
, arg0
, arg1
);
16242 gimple_set_location (g
, gimple_location (stmt
));
16243 gsi_replace (gsi
, g
, true);
16246 /* Flavors of vec_div (Integer). */
16247 case VSX_BUILTIN_DIV_V2DI
:
16248 case VSX_BUILTIN_UDIV_V2DI
:
16250 arg0
= gimple_call_arg (stmt
, 0);
16251 arg1
= gimple_call_arg (stmt
, 1);
16252 lhs
= gimple_call_lhs (stmt
);
16253 gimple
*g
= gimple_build_assign (lhs
, TRUNC_DIV_EXPR
, arg0
, arg1
);
16254 gimple_set_location (g
, gimple_location (stmt
));
16255 gsi_replace (gsi
, g
, true);
16258 /* Flavors of vec_div (Float). */
16259 case VSX_BUILTIN_XVDIVSP
:
16260 case VSX_BUILTIN_XVDIVDP
:
16262 arg0
= gimple_call_arg (stmt
, 0);
16263 arg1
= gimple_call_arg (stmt
, 1);
16264 lhs
= gimple_call_lhs (stmt
);
16265 gimple
*g
= gimple_build_assign (lhs
, RDIV_EXPR
, arg0
, arg1
);
16266 gimple_set_location (g
, gimple_location (stmt
));
16267 gsi_replace (gsi
, g
, true);
16270 /* Flavors of vec_and. */
16271 case ALTIVEC_BUILTIN_VAND
:
16273 arg0
= gimple_call_arg (stmt
, 0);
16274 arg1
= gimple_call_arg (stmt
, 1);
16275 lhs
= gimple_call_lhs (stmt
);
16276 gimple
*g
= gimple_build_assign (lhs
, BIT_AND_EXPR
, arg0
, arg1
);
16277 gimple_set_location (g
, gimple_location (stmt
));
16278 gsi_replace (gsi
, g
, true);
16281 /* Flavors of vec_andc. */
16282 case ALTIVEC_BUILTIN_VANDC
:
16284 arg0
= gimple_call_arg (stmt
, 0);
16285 arg1
= gimple_call_arg (stmt
, 1);
16286 lhs
= gimple_call_lhs (stmt
);
16287 tree temp
= create_tmp_reg_or_ssa_name (TREE_TYPE (arg1
));
16288 gimple
*g
= gimple_build_assign(temp
, BIT_NOT_EXPR
, arg1
);
16289 gimple_set_location (g
, gimple_location (stmt
));
16290 gsi_insert_before(gsi
, g
, GSI_SAME_STMT
);
16291 g
= gimple_build_assign (lhs
, BIT_AND_EXPR
, arg0
, temp
);
16292 gimple_set_location (g
, gimple_location (stmt
));
16293 gsi_replace (gsi
, g
, true);
16296 /* Flavors of vec_nand. */
16297 case P8V_BUILTIN_VEC_NAND
:
16298 case P8V_BUILTIN_NAND_V16QI
:
16299 case P8V_BUILTIN_NAND_V8HI
:
16300 case P8V_BUILTIN_NAND_V4SI
:
16301 case P8V_BUILTIN_NAND_V4SF
:
16302 case P8V_BUILTIN_NAND_V2DF
:
16303 case P8V_BUILTIN_NAND_V2DI
:
16305 arg0
= gimple_call_arg (stmt
, 0);
16306 arg1
= gimple_call_arg (stmt
, 1);
16307 lhs
= gimple_call_lhs (stmt
);
16308 tree temp
= create_tmp_reg_or_ssa_name (TREE_TYPE (arg1
));
16309 gimple
*g
= gimple_build_assign(temp
, BIT_AND_EXPR
, arg0
, arg1
);
16310 gimple_set_location (g
, gimple_location (stmt
));
16311 gsi_insert_before(gsi
, g
, GSI_SAME_STMT
);
16312 g
= gimple_build_assign (lhs
, BIT_NOT_EXPR
, temp
);
16313 gimple_set_location (g
, gimple_location (stmt
));
16314 gsi_replace (gsi
, g
, true);
16317 /* Flavors of vec_or. */
16318 case ALTIVEC_BUILTIN_VOR
:
16320 arg0
= gimple_call_arg (stmt
, 0);
16321 arg1
= gimple_call_arg (stmt
, 1);
16322 lhs
= gimple_call_lhs (stmt
);
16323 gimple
*g
= gimple_build_assign (lhs
, BIT_IOR_EXPR
, arg0
, arg1
);
16324 gimple_set_location (g
, gimple_location (stmt
));
16325 gsi_replace (gsi
, g
, true);
16328 /* flavors of vec_orc. */
16329 case P8V_BUILTIN_ORC_V16QI
:
16330 case P8V_BUILTIN_ORC_V8HI
:
16331 case P8V_BUILTIN_ORC_V4SI
:
16332 case P8V_BUILTIN_ORC_V4SF
:
16333 case P8V_BUILTIN_ORC_V2DF
:
16334 case P8V_BUILTIN_ORC_V2DI
:
16336 arg0
= gimple_call_arg (stmt
, 0);
16337 arg1
= gimple_call_arg (stmt
, 1);
16338 lhs
= gimple_call_lhs (stmt
);
16339 tree temp
= create_tmp_reg_or_ssa_name (TREE_TYPE (arg1
));
16340 gimple
*g
= gimple_build_assign(temp
, BIT_NOT_EXPR
, arg1
);
16341 gimple_set_location (g
, gimple_location (stmt
));
16342 gsi_insert_before(gsi
, g
, GSI_SAME_STMT
);
16343 g
= gimple_build_assign (lhs
, BIT_IOR_EXPR
, arg0
, temp
);
16344 gimple_set_location (g
, gimple_location (stmt
));
16345 gsi_replace (gsi
, g
, true);
16348 /* Flavors of vec_xor. */
16349 case ALTIVEC_BUILTIN_VXOR
:
16351 arg0
= gimple_call_arg (stmt
, 0);
16352 arg1
= gimple_call_arg (stmt
, 1);
16353 lhs
= gimple_call_lhs (stmt
);
16354 gimple
*g
= gimple_build_assign (lhs
, BIT_XOR_EXPR
, arg0
, arg1
);
16355 gimple_set_location (g
, gimple_location (stmt
));
16356 gsi_replace (gsi
, g
, true);
16359 /* Flavors of vec_nor. */
16360 case ALTIVEC_BUILTIN_VNOR
:
16362 arg0
= gimple_call_arg (stmt
, 0);
16363 arg1
= gimple_call_arg (stmt
, 1);
16364 lhs
= gimple_call_lhs (stmt
);
16365 tree temp
= create_tmp_reg_or_ssa_name (TREE_TYPE (arg1
));
16366 gimple
*g
= gimple_build_assign (temp
, BIT_IOR_EXPR
, arg0
, arg1
);
16367 gimple_set_location (g
, gimple_location (stmt
));
16368 gsi_insert_before(gsi
, g
, GSI_SAME_STMT
);
16369 g
= gimple_build_assign (lhs
, BIT_NOT_EXPR
, temp
);
16370 gimple_set_location (g
, gimple_location (stmt
));
16371 gsi_replace (gsi
, g
, true);
16374 /* flavors of vec_abs. */
16375 case ALTIVEC_BUILTIN_ABS_V16QI
:
16376 case ALTIVEC_BUILTIN_ABS_V8HI
:
16377 case ALTIVEC_BUILTIN_ABS_V4SI
:
16378 case ALTIVEC_BUILTIN_ABS_V4SF
:
16379 case P8V_BUILTIN_ABS_V2DI
:
16380 case VSX_BUILTIN_XVABSDP
:
16382 arg0
= gimple_call_arg (stmt
, 0);
16383 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0
)))
16384 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0
))))
16386 lhs
= gimple_call_lhs (stmt
);
16387 gimple
*g
= gimple_build_assign (lhs
, ABS_EXPR
, arg0
);
16388 gimple_set_location (g
, gimple_location (stmt
));
16389 gsi_replace (gsi
, g
, true);
16392 /* flavors of vec_min. */
16393 case VSX_BUILTIN_XVMINDP
:
16394 case P8V_BUILTIN_VMINSD
:
16395 case P8V_BUILTIN_VMINUD
:
16396 case ALTIVEC_BUILTIN_VMINSB
:
16397 case ALTIVEC_BUILTIN_VMINSH
:
16398 case ALTIVEC_BUILTIN_VMINSW
:
16399 case ALTIVEC_BUILTIN_VMINUB
:
16400 case ALTIVEC_BUILTIN_VMINUH
:
16401 case ALTIVEC_BUILTIN_VMINUW
:
16402 case ALTIVEC_BUILTIN_VMINFP
:
16404 arg0
= gimple_call_arg (stmt
, 0);
16405 arg1
= gimple_call_arg (stmt
, 1);
16406 lhs
= gimple_call_lhs (stmt
);
16407 gimple
*g
= gimple_build_assign (lhs
, MIN_EXPR
, arg0
, arg1
);
16408 gimple_set_location (g
, gimple_location (stmt
));
16409 gsi_replace (gsi
, g
, true);
16412 /* flavors of vec_max. */
16413 case VSX_BUILTIN_XVMAXDP
:
16414 case P8V_BUILTIN_VMAXSD
:
16415 case P8V_BUILTIN_VMAXUD
:
16416 case ALTIVEC_BUILTIN_VMAXSB
:
16417 case ALTIVEC_BUILTIN_VMAXSH
:
16418 case ALTIVEC_BUILTIN_VMAXSW
:
16419 case ALTIVEC_BUILTIN_VMAXUB
:
16420 case ALTIVEC_BUILTIN_VMAXUH
:
16421 case ALTIVEC_BUILTIN_VMAXUW
:
16422 case ALTIVEC_BUILTIN_VMAXFP
:
16424 arg0
= gimple_call_arg (stmt
, 0);
16425 arg1
= gimple_call_arg (stmt
, 1);
16426 lhs
= gimple_call_lhs (stmt
);
16427 gimple
*g
= gimple_build_assign (lhs
, MAX_EXPR
, arg0
, arg1
);
16428 gimple_set_location (g
, gimple_location (stmt
));
16429 gsi_replace (gsi
, g
, true);
16432 /* Flavors of vec_eqv. */
16433 case P8V_BUILTIN_EQV_V16QI
:
16434 case P8V_BUILTIN_EQV_V8HI
:
16435 case P8V_BUILTIN_EQV_V4SI
:
16436 case P8V_BUILTIN_EQV_V4SF
:
16437 case P8V_BUILTIN_EQV_V2DF
:
16438 case P8V_BUILTIN_EQV_V2DI
:
16440 arg0
= gimple_call_arg (stmt
, 0);
16441 arg1
= gimple_call_arg (stmt
, 1);
16442 lhs
= gimple_call_lhs (stmt
);
16443 tree temp
= create_tmp_reg_or_ssa_name (TREE_TYPE (arg1
));
16444 gimple
*g
= gimple_build_assign (temp
, BIT_XOR_EXPR
, arg0
, arg1
);
16445 gimple_set_location (g
, gimple_location (stmt
));
16446 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
16447 g
= gimple_build_assign (lhs
, BIT_NOT_EXPR
, temp
);
16448 gimple_set_location (g
, gimple_location (stmt
));
16449 gsi_replace (gsi
, g
, true);
16452 /* Flavors of vec_rotate_left. */
16453 case ALTIVEC_BUILTIN_VRLB
:
16454 case ALTIVEC_BUILTIN_VRLH
:
16455 case ALTIVEC_BUILTIN_VRLW
:
16456 case P8V_BUILTIN_VRLD
:
16458 arg0
= gimple_call_arg (stmt
, 0);
16459 arg1
= gimple_call_arg (stmt
, 1);
16460 lhs
= gimple_call_lhs (stmt
);
16461 gimple
*g
= gimple_build_assign (lhs
, LROTATE_EXPR
, arg0
, arg1
);
16462 gimple_set_location (g
, gimple_location (stmt
));
16463 gsi_replace (gsi
, g
, true);
16466 /* Flavors of vector shift right algebraic.
16467 vec_sra{b,h,w} -> vsra{b,h,w}. */
16468 case ALTIVEC_BUILTIN_VSRAB
:
16469 case ALTIVEC_BUILTIN_VSRAH
:
16470 case ALTIVEC_BUILTIN_VSRAW
:
16471 case P8V_BUILTIN_VSRAD
:
16473 arg0
= gimple_call_arg (stmt
, 0);
16474 arg1
= gimple_call_arg (stmt
, 1);
16475 lhs
= gimple_call_lhs (stmt
);
16476 gimple
*g
= gimple_build_assign (lhs
, RSHIFT_EXPR
, arg0
, arg1
);
16477 gimple_set_location (g
, gimple_location (stmt
));
16478 gsi_replace (gsi
, g
, true);
16481 /* Flavors of vector shift left.
16482 builtin_altivec_vsl{b,h,w} -> vsl{b,h,w}. */
16483 case ALTIVEC_BUILTIN_VSLB
:
16484 case ALTIVEC_BUILTIN_VSLH
:
16485 case ALTIVEC_BUILTIN_VSLW
:
16486 case P8V_BUILTIN_VSLD
:
16488 arg0
= gimple_call_arg (stmt
, 0);
16489 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0
)))
16490 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0
))))
16492 arg1
= gimple_call_arg (stmt
, 1);
16493 lhs
= gimple_call_lhs (stmt
);
16494 gimple
*g
= gimple_build_assign (lhs
, LSHIFT_EXPR
, arg0
, arg1
);
16495 gimple_set_location (g
, gimple_location (stmt
));
16496 gsi_replace (gsi
, g
, true);
16499 /* Flavors of vector shift right. */
16500 case ALTIVEC_BUILTIN_VSRB
:
16501 case ALTIVEC_BUILTIN_VSRH
:
16502 case ALTIVEC_BUILTIN_VSRW
:
16503 case P8V_BUILTIN_VSRD
:
16505 arg0
= gimple_call_arg (stmt
, 0);
16506 arg1
= gimple_call_arg (stmt
, 1);
16507 lhs
= gimple_call_lhs (stmt
);
16508 gimple_seq stmts
= NULL
;
16509 /* Convert arg0 to unsigned. */
16511 = gimple_build (&stmts
, VIEW_CONVERT_EXPR
,
16512 unsigned_type_for (TREE_TYPE (arg0
)), arg0
);
16514 = gimple_build (&stmts
, RSHIFT_EXPR
,
16515 TREE_TYPE (arg0_unsigned
), arg0_unsigned
, arg1
);
16516 /* Convert result back to the lhs type. */
16517 res
= gimple_build (&stmts
, VIEW_CONVERT_EXPR
, TREE_TYPE (lhs
), res
);
16518 gsi_insert_seq_before (gsi
, stmts
, GSI_SAME_STMT
);
16519 update_call_from_tree (gsi
, res
);
16522 /* Vector loads. */
16523 case ALTIVEC_BUILTIN_LVX_V16QI
:
16524 case ALTIVEC_BUILTIN_LVX_V8HI
:
16525 case ALTIVEC_BUILTIN_LVX_V4SI
:
16526 case ALTIVEC_BUILTIN_LVX_V4SF
:
16527 case ALTIVEC_BUILTIN_LVX_V2DI
:
16528 case ALTIVEC_BUILTIN_LVX_V2DF
:
16530 arg0
= gimple_call_arg (stmt
, 0); // offset
16531 arg1
= gimple_call_arg (stmt
, 1); // address
16532 /* Do not fold for -maltivec=be on LE targets. */
16533 if (VECTOR_ELT_ORDER_BIG
&& !BYTES_BIG_ENDIAN
)
16535 lhs
= gimple_call_lhs (stmt
);
16536 location_t loc
= gimple_location (stmt
);
16537 /* Since arg1 may be cast to a different type, just use ptr_type_node
16538 here instead of trying to enforce TBAA on pointer types. */
16539 tree arg1_type
= ptr_type_node
;
16540 tree lhs_type
= TREE_TYPE (lhs
);
16541 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
16542 the tree using the value from arg0. The resulting type will match
16543 the type of arg1. */
16544 gimple_seq stmts
= NULL
;
16545 tree temp_offset
= gimple_convert (&stmts
, loc
, sizetype
, arg0
);
16546 tree temp_addr
= gimple_build (&stmts
, loc
, POINTER_PLUS_EXPR
,
16547 arg1_type
, arg1
, temp_offset
);
16548 /* Mask off any lower bits from the address. */
16549 tree aligned_addr
= gimple_build (&stmts
, loc
, BIT_AND_EXPR
,
16550 arg1_type
, temp_addr
,
16551 build_int_cst (arg1_type
, -16));
16552 gsi_insert_seq_before (gsi
, stmts
, GSI_SAME_STMT
);
16553 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
16554 take an offset, but since we've already incorporated the offset
16555 above, here we just pass in a zero. */
16557 g
= gimple_build_assign (lhs
, build2 (MEM_REF
, lhs_type
, aligned_addr
,
16558 build_int_cst (arg1_type
, 0)));
16559 gimple_set_location (g
, loc
);
16560 gsi_replace (gsi
, g
, true);
16563 /* Vector stores. */
16564 case ALTIVEC_BUILTIN_STVX_V16QI
:
16565 case ALTIVEC_BUILTIN_STVX_V8HI
:
16566 case ALTIVEC_BUILTIN_STVX_V4SI
:
16567 case ALTIVEC_BUILTIN_STVX_V4SF
:
16568 case ALTIVEC_BUILTIN_STVX_V2DI
:
16569 case ALTIVEC_BUILTIN_STVX_V2DF
:
16571 /* Do not fold for -maltivec=be on LE targets. */
16572 if (VECTOR_ELT_ORDER_BIG
&& !BYTES_BIG_ENDIAN
)
16574 arg0
= gimple_call_arg (stmt
, 0); /* Value to be stored. */
16575 arg1
= gimple_call_arg (stmt
, 1); /* Offset. */
16576 tree arg2
= gimple_call_arg (stmt
, 2); /* Store-to address. */
16577 location_t loc
= gimple_location (stmt
);
16578 tree arg0_type
= TREE_TYPE (arg0
);
16579 /* Use ptr_type_node (no TBAA) for the arg2_type.
16580 FIXME: (Richard) "A proper fix would be to transition this type as
16581 seen from the frontend to GIMPLE, for example in a similar way we
16582 do for MEM_REFs by piggy-backing that on an extra argument, a
16583 constant zero pointer of the alias pointer type to use (which would
16584 also serve as a type indicator of the store itself). I'd use a
16585 target specific internal function for this (not sure if we can have
16586 those target specific, but I guess if it's folded away then that's
16587 fine) and get away with the overload set."
16589 tree arg2_type
= ptr_type_node
;
16590 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
16591 the tree using the value from arg0. The resulting type will match
16592 the type of arg2. */
16593 gimple_seq stmts
= NULL
;
16594 tree temp_offset
= gimple_convert (&stmts
, loc
, sizetype
, arg1
);
16595 tree temp_addr
= gimple_build (&stmts
, loc
, POINTER_PLUS_EXPR
,
16596 arg2_type
, arg2
, temp_offset
);
16597 /* Mask off any lower bits from the address. */
16598 tree aligned_addr
= gimple_build (&stmts
, loc
, BIT_AND_EXPR
,
16599 arg2_type
, temp_addr
,
16600 build_int_cst (arg2_type
, -16));
16601 gsi_insert_seq_before (gsi
, stmts
, GSI_SAME_STMT
);
16602 /* The desired gimple result should be similar to:
16603 MEM[(__vector floatD.1407 *)_1] = vf1D.2697; */
16605 g
= gimple_build_assign (build2 (MEM_REF
, arg0_type
, aligned_addr
,
16606 build_int_cst (arg2_type
, 0)), arg0
);
16607 gimple_set_location (g
, loc
);
16608 gsi_replace (gsi
, g
, true);
16612 if (TARGET_DEBUG_BUILTIN
)
16613 fprintf (stderr
, "gimple builtin intrinsic not matched:%d %s %s\n",
16614 fn_code
, fn_name1
, fn_name2
);
16621 /* Expand an expression EXP that calls a built-in function,
16622 with result going to TARGET if that's convenient
16623 (and in mode MODE if that's convenient).
16624 SUBTARGET may be used as the target for computing one of EXP's operands.
16625 IGNORE is nonzero if the value is to be ignored. */
16628 rs6000_expand_builtin (tree exp
, rtx target
, rtx subtarget ATTRIBUTE_UNUSED
,
16629 machine_mode mode ATTRIBUTE_UNUSED
,
16630 int ignore ATTRIBUTE_UNUSED
)
16632 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
16633 enum rs6000_builtins fcode
16634 = (enum rs6000_builtins
)DECL_FUNCTION_CODE (fndecl
);
16635 size_t uns_fcode
= (size_t)fcode
;
16636 const struct builtin_description
*d
;
16640 HOST_WIDE_INT mask
= rs6000_builtin_info
[uns_fcode
].mask
;
16641 bool func_valid_p
= ((rs6000_builtin_mask
& mask
) == mask
);
16643 if (TARGET_DEBUG_BUILTIN
)
16645 enum insn_code icode
= rs6000_builtin_info
[uns_fcode
].icode
;
16646 const char *name1
= rs6000_builtin_info
[uns_fcode
].name
;
16647 const char *name2
= (icode
!= CODE_FOR_nothing
)
16648 ? get_insn_name ((int) icode
)
16652 switch (rs6000_builtin_info
[uns_fcode
].attr
& RS6000_BTC_TYPE_MASK
)
16654 default: name3
= "unknown"; break;
16655 case RS6000_BTC_SPECIAL
: name3
= "special"; break;
16656 case RS6000_BTC_UNARY
: name3
= "unary"; break;
16657 case RS6000_BTC_BINARY
: name3
= "binary"; break;
16658 case RS6000_BTC_TERNARY
: name3
= "ternary"; break;
16659 case RS6000_BTC_PREDICATE
: name3
= "predicate"; break;
16660 case RS6000_BTC_ABS
: name3
= "abs"; break;
16661 case RS6000_BTC_DST
: name3
= "dst"; break;
16666 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
16667 (name1
) ? name1
: "---", fcode
,
16668 (name2
) ? name2
: "---", (int) icode
,
16670 func_valid_p
? "" : ", not valid");
16675 rs6000_invalid_builtin (fcode
);
16677 /* Given it is invalid, just generate a normal call. */
16678 return expand_call (exp
, target
, ignore
);
16683 case RS6000_BUILTIN_RECIP
:
16684 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3
, exp
, target
);
16686 case RS6000_BUILTIN_RECIPF
:
16687 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3
, exp
, target
);
16689 case RS6000_BUILTIN_RSQRTF
:
16690 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2
, exp
, target
);
16692 case RS6000_BUILTIN_RSQRT
:
16693 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2
, exp
, target
);
16695 case POWER7_BUILTIN_BPERMD
:
16696 return rs6000_expand_binop_builtin (((TARGET_64BIT
)
16697 ? CODE_FOR_bpermd_di
16698 : CODE_FOR_bpermd_si
), exp
, target
);
16700 case RS6000_BUILTIN_GET_TB
:
16701 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase
,
16704 case RS6000_BUILTIN_MFTB
:
16705 return rs6000_expand_zeroop_builtin (((TARGET_64BIT
)
16706 ? CODE_FOR_rs6000_mftb_di
16707 : CODE_FOR_rs6000_mftb_si
),
16710 case RS6000_BUILTIN_MFFS
:
16711 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs
, target
);
16713 case RS6000_BUILTIN_MTFSF
:
16714 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf
, exp
);
16716 case RS6000_BUILTIN_CPU_INIT
:
16717 case RS6000_BUILTIN_CPU_IS
:
16718 case RS6000_BUILTIN_CPU_SUPPORTS
:
16719 return cpu_expand_builtin (fcode
, exp
, target
);
16721 case ALTIVEC_BUILTIN_MASK_FOR_LOAD
:
16722 case ALTIVEC_BUILTIN_MASK_FOR_STORE
:
16724 int icode
= (BYTES_BIG_ENDIAN
? (int) CODE_FOR_altivec_lvsr_direct
16725 : (int) CODE_FOR_altivec_lvsl_direct
);
16726 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
16727 machine_mode mode
= insn_data
[icode
].operand
[1].mode
;
16731 gcc_assert (TARGET_ALTIVEC
);
16733 arg
= CALL_EXPR_ARG (exp
, 0);
16734 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg
)));
16735 op
= expand_expr (arg
, NULL_RTX
, Pmode
, EXPAND_NORMAL
);
16736 addr
= memory_address (mode
, op
);
16737 if (fcode
== ALTIVEC_BUILTIN_MASK_FOR_STORE
)
16741 /* For the load case need to negate the address. */
16742 op
= gen_reg_rtx (GET_MODE (addr
));
16743 emit_insn (gen_rtx_SET (op
, gen_rtx_NEG (GET_MODE (addr
), addr
)));
16745 op
= gen_rtx_MEM (mode
, op
);
16748 || GET_MODE (target
) != tmode
16749 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
16750 target
= gen_reg_rtx (tmode
);
16752 pat
= GEN_FCN (icode
) (target
, op
);
16760 case ALTIVEC_BUILTIN_VCFUX
:
16761 case ALTIVEC_BUILTIN_VCFSX
:
16762 case ALTIVEC_BUILTIN_VCTUXS
:
16763 case ALTIVEC_BUILTIN_VCTSXS
:
16764 /* FIXME: There's got to be a nicer way to handle this case than
16765 constructing a new CALL_EXPR. */
16766 if (call_expr_nargs (exp
) == 1)
16768 exp
= build_call_nary (TREE_TYPE (exp
), CALL_EXPR_FN (exp
),
16769 2, CALL_EXPR_ARG (exp
, 0), integer_zero_node
);
16777 if (TARGET_ALTIVEC
)
16779 ret
= altivec_expand_builtin (exp
, target
, &success
);
16784 if (TARGET_PAIRED_FLOAT
)
16786 ret
= paired_expand_builtin (exp
, target
, &success
);
16793 ret
= htm_expand_builtin (exp
, target
, &success
);
16799 unsigned attr
= rs6000_builtin_info
[uns_fcode
].attr
& RS6000_BTC_TYPE_MASK
;
16800 /* RS6000_BTC_SPECIAL represents no-operand operators. */
16801 gcc_assert (attr
== RS6000_BTC_UNARY
16802 || attr
== RS6000_BTC_BINARY
16803 || attr
== RS6000_BTC_TERNARY
16804 || attr
== RS6000_BTC_SPECIAL
);
16806 /* Handle simple unary operations. */
16808 for (i
= 0; i
< ARRAY_SIZE (bdesc_1arg
); i
++, d
++)
16809 if (d
->code
== fcode
)
16810 return rs6000_expand_unop_builtin (d
->icode
, exp
, target
);
16812 /* Handle simple binary operations. */
16814 for (i
= 0; i
< ARRAY_SIZE (bdesc_2arg
); i
++, d
++)
16815 if (d
->code
== fcode
)
16816 return rs6000_expand_binop_builtin (d
->icode
, exp
, target
);
16818 /* Handle simple ternary operations. */
16820 for (i
= 0; i
< ARRAY_SIZE (bdesc_3arg
); i
++, d
++)
16821 if (d
->code
== fcode
)
16822 return rs6000_expand_ternop_builtin (d
->icode
, exp
, target
);
16824 /* Handle simple no-argument operations. */
16826 for (i
= 0; i
< ARRAY_SIZE (bdesc_0arg
); i
++, d
++)
16827 if (d
->code
== fcode
)
16828 return rs6000_expand_zeroop_builtin (d
->icode
, target
);
16830 gcc_unreachable ();
16833 /* Create a builtin vector type with a name. Taking care not to give
16834 the canonical type a name. */
16837 rs6000_vector_type (const char *name
, tree elt_type
, unsigned num_elts
)
16839 tree result
= build_vector_type (elt_type
, num_elts
);
16841 /* Copy so we don't give the canonical type a name. */
16842 result
= build_variant_type_copy (result
);
16844 add_builtin_type (name
, result
);
16850 rs6000_init_builtins (void)
16856 if (TARGET_DEBUG_BUILTIN
)
16857 fprintf (stderr
, "rs6000_init_builtins%s%s%s\n",
16858 (TARGET_PAIRED_FLOAT
) ? ", paired" : "",
16859 (TARGET_ALTIVEC
) ? ", altivec" : "",
16860 (TARGET_VSX
) ? ", vsx" : "");
16862 V2SI_type_node
= build_vector_type (intSI_type_node
, 2);
16863 V2SF_type_node
= build_vector_type (float_type_node
, 2);
16864 V2DI_type_node
= rs6000_vector_type (TARGET_POWERPC64
? "__vector long"
16865 : "__vector long long",
16866 intDI_type_node
, 2);
16867 V2DF_type_node
= rs6000_vector_type ("__vector double", double_type_node
, 2);
16868 V4SI_type_node
= rs6000_vector_type ("__vector signed int",
16869 intSI_type_node
, 4);
16870 V4SF_type_node
= rs6000_vector_type ("__vector float", float_type_node
, 4);
16871 V8HI_type_node
= rs6000_vector_type ("__vector signed short",
16872 intHI_type_node
, 8);
16873 V16QI_type_node
= rs6000_vector_type ("__vector signed char",
16874 intQI_type_node
, 16);
16876 unsigned_V16QI_type_node
= rs6000_vector_type ("__vector unsigned char",
16877 unsigned_intQI_type_node
, 16);
16878 unsigned_V8HI_type_node
= rs6000_vector_type ("__vector unsigned short",
16879 unsigned_intHI_type_node
, 8);
16880 unsigned_V4SI_type_node
= rs6000_vector_type ("__vector unsigned int",
16881 unsigned_intSI_type_node
, 4);
16882 unsigned_V2DI_type_node
= rs6000_vector_type (TARGET_POWERPC64
16883 ? "__vector unsigned long"
16884 : "__vector unsigned long long",
16885 unsigned_intDI_type_node
, 2);
16887 opaque_V2SF_type_node
= build_opaque_vector_type (float_type_node
, 2);
16888 opaque_V2SI_type_node
= build_opaque_vector_type (intSI_type_node
, 2);
16889 opaque_p_V2SI_type_node
= build_pointer_type (opaque_V2SI_type_node
);
16890 opaque_V4SI_type_node
= build_opaque_vector_type (intSI_type_node
, 4);
16892 const_str_type_node
16893 = build_pointer_type (build_qualified_type (char_type_node
,
16896 /* We use V1TI mode as a special container to hold __int128_t items that
16897 must live in VSX registers. */
16898 if (intTI_type_node
)
16900 V1TI_type_node
= rs6000_vector_type ("__vector __int128",
16901 intTI_type_node
, 1);
16902 unsigned_V1TI_type_node
16903 = rs6000_vector_type ("__vector unsigned __int128",
16904 unsigned_intTI_type_node
, 1);
16907 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
16908 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
16909 'vector unsigned short'. */
16911 bool_char_type_node
= build_distinct_type_copy (unsigned_intQI_type_node
);
16912 bool_short_type_node
= build_distinct_type_copy (unsigned_intHI_type_node
);
16913 bool_int_type_node
= build_distinct_type_copy (unsigned_intSI_type_node
);
16914 bool_long_type_node
= build_distinct_type_copy (unsigned_intDI_type_node
);
16915 pixel_type_node
= build_distinct_type_copy (unsigned_intHI_type_node
);
16917 long_integer_type_internal_node
= long_integer_type_node
;
16918 long_unsigned_type_internal_node
= long_unsigned_type_node
;
16919 long_long_integer_type_internal_node
= long_long_integer_type_node
;
16920 long_long_unsigned_type_internal_node
= long_long_unsigned_type_node
;
16921 intQI_type_internal_node
= intQI_type_node
;
16922 uintQI_type_internal_node
= unsigned_intQI_type_node
;
16923 intHI_type_internal_node
= intHI_type_node
;
16924 uintHI_type_internal_node
= unsigned_intHI_type_node
;
16925 intSI_type_internal_node
= intSI_type_node
;
16926 uintSI_type_internal_node
= unsigned_intSI_type_node
;
16927 intDI_type_internal_node
= intDI_type_node
;
16928 uintDI_type_internal_node
= unsigned_intDI_type_node
;
16929 intTI_type_internal_node
= intTI_type_node
;
16930 uintTI_type_internal_node
= unsigned_intTI_type_node
;
16931 float_type_internal_node
= float_type_node
;
16932 double_type_internal_node
= double_type_node
;
16933 long_double_type_internal_node
= long_double_type_node
;
16934 dfloat64_type_internal_node
= dfloat64_type_node
;
16935 dfloat128_type_internal_node
= dfloat128_type_node
;
16936 void_type_internal_node
= void_type_node
;
16938 /* 128-bit floating point support. KFmode is IEEE 128-bit floating point.
16939 IFmode is the IBM extended 128-bit format that is a pair of doubles.
16940 TFmode will be either IEEE 128-bit floating point or the IBM double-double
16941 format that uses a pair of doubles, depending on the switches and
16944 If we don't support for either 128-bit IBM double double or IEEE 128-bit
16945 floating point, we need make sure the type is non-zero or else self-test
16946 fails during bootstrap.
16948 We don't register a built-in type for __ibm128 if the type is the same as
16949 long double. Instead we add a #define for __ibm128 in
16950 rs6000_cpu_cpp_builtins to long double.
16952 For IEEE 128-bit floating point, always create the type __ieee128. If the
16953 user used -mfloat128, rs6000-c.c will create a define from __float128 to
16955 if (TARGET_LONG_DOUBLE_128
&& FLOAT128_IEEE_P (TFmode
))
16957 ibm128_float_type_node
= make_node (REAL_TYPE
);
16958 TYPE_PRECISION (ibm128_float_type_node
) = 128;
16959 SET_TYPE_MODE (ibm128_float_type_node
, IFmode
);
16960 layout_type (ibm128_float_type_node
);
16962 lang_hooks
.types
.register_builtin_type (ibm128_float_type_node
,
16966 ibm128_float_type_node
= long_double_type_node
;
16968 if (TARGET_FLOAT128_TYPE
)
16970 ieee128_float_type_node
= float128_type_node
;
16971 lang_hooks
.types
.register_builtin_type (ieee128_float_type_node
,
16976 ieee128_float_type_node
= long_double_type_node
;
16978 /* Initialize the modes for builtin_function_type, mapping a machine mode to
16980 builtin_mode_to_type
[QImode
][0] = integer_type_node
;
16981 builtin_mode_to_type
[HImode
][0] = integer_type_node
;
16982 builtin_mode_to_type
[SImode
][0] = intSI_type_node
;
16983 builtin_mode_to_type
[SImode
][1] = unsigned_intSI_type_node
;
16984 builtin_mode_to_type
[DImode
][0] = intDI_type_node
;
16985 builtin_mode_to_type
[DImode
][1] = unsigned_intDI_type_node
;
16986 builtin_mode_to_type
[TImode
][0] = intTI_type_node
;
16987 builtin_mode_to_type
[TImode
][1] = unsigned_intTI_type_node
;
16988 builtin_mode_to_type
[SFmode
][0] = float_type_node
;
16989 builtin_mode_to_type
[DFmode
][0] = double_type_node
;
16990 builtin_mode_to_type
[IFmode
][0] = ibm128_float_type_node
;
16991 builtin_mode_to_type
[KFmode
][0] = ieee128_float_type_node
;
16992 builtin_mode_to_type
[TFmode
][0] = long_double_type_node
;
16993 builtin_mode_to_type
[DDmode
][0] = dfloat64_type_node
;
16994 builtin_mode_to_type
[TDmode
][0] = dfloat128_type_node
;
16995 builtin_mode_to_type
[V1TImode
][0] = V1TI_type_node
;
16996 builtin_mode_to_type
[V1TImode
][1] = unsigned_V1TI_type_node
;
16997 builtin_mode_to_type
[V2SImode
][0] = V2SI_type_node
;
16998 builtin_mode_to_type
[V2SFmode
][0] = V2SF_type_node
;
16999 builtin_mode_to_type
[V2DImode
][0] = V2DI_type_node
;
17000 builtin_mode_to_type
[V2DImode
][1] = unsigned_V2DI_type_node
;
17001 builtin_mode_to_type
[V2DFmode
][0] = V2DF_type_node
;
17002 builtin_mode_to_type
[V4SImode
][0] = V4SI_type_node
;
17003 builtin_mode_to_type
[V4SImode
][1] = unsigned_V4SI_type_node
;
17004 builtin_mode_to_type
[V4SFmode
][0] = V4SF_type_node
;
17005 builtin_mode_to_type
[V8HImode
][0] = V8HI_type_node
;
17006 builtin_mode_to_type
[V8HImode
][1] = unsigned_V8HI_type_node
;
17007 builtin_mode_to_type
[V16QImode
][0] = V16QI_type_node
;
17008 builtin_mode_to_type
[V16QImode
][1] = unsigned_V16QI_type_node
;
17010 tdecl
= add_builtin_type ("__bool char", bool_char_type_node
);
17011 TYPE_NAME (bool_char_type_node
) = tdecl
;
17013 tdecl
= add_builtin_type ("__bool short", bool_short_type_node
);
17014 TYPE_NAME (bool_short_type_node
) = tdecl
;
17016 tdecl
= add_builtin_type ("__bool int", bool_int_type_node
);
17017 TYPE_NAME (bool_int_type_node
) = tdecl
;
17019 tdecl
= add_builtin_type ("__pixel", pixel_type_node
);
17020 TYPE_NAME (pixel_type_node
) = tdecl
;
17022 bool_V16QI_type_node
= rs6000_vector_type ("__vector __bool char",
17023 bool_char_type_node
, 16);
17024 bool_V8HI_type_node
= rs6000_vector_type ("__vector __bool short",
17025 bool_short_type_node
, 8);
17026 bool_V4SI_type_node
= rs6000_vector_type ("__vector __bool int",
17027 bool_int_type_node
, 4);
17028 bool_V2DI_type_node
= rs6000_vector_type (TARGET_POWERPC64
17029 ? "__vector __bool long"
17030 : "__vector __bool long long",
17031 bool_long_type_node
, 2);
17032 pixel_V8HI_type_node
= rs6000_vector_type ("__vector __pixel",
17033 pixel_type_node
, 8);
17035 /* Paired builtins are only available if you build a compiler with the
17036 appropriate options, so only create those builtins with the appropriate
17037 compiler option. Create Altivec and VSX builtins on machines with at
17038 least the general purpose extensions (970 and newer) to allow the use of
17039 the target attribute. */
17040 if (TARGET_PAIRED_FLOAT
)
17041 paired_init_builtins ();
17042 if (TARGET_EXTRA_BUILTINS
)
17043 altivec_init_builtins ();
17045 htm_init_builtins ();
17047 if (TARGET_EXTRA_BUILTINS
|| TARGET_PAIRED_FLOAT
)
17048 rs6000_common_init_builtins ();
17050 ftype
= build_function_type_list (ieee128_float_type_node
,
17051 const_str_type_node
, NULL_TREE
);
17052 def_builtin ("__builtin_nanq", ftype
, RS6000_BUILTIN_NANQ
);
17053 def_builtin ("__builtin_nansq", ftype
, RS6000_BUILTIN_NANSQ
);
17055 ftype
= build_function_type_list (ieee128_float_type_node
, NULL_TREE
);
17056 def_builtin ("__builtin_infq", ftype
, RS6000_BUILTIN_INFQ
);
17057 def_builtin ("__builtin_huge_valq", ftype
, RS6000_BUILTIN_HUGE_VALQ
);
17059 ftype
= builtin_function_type (DFmode
, DFmode
, DFmode
, VOIDmode
,
17060 RS6000_BUILTIN_RECIP
, "__builtin_recipdiv");
17061 def_builtin ("__builtin_recipdiv", ftype
, RS6000_BUILTIN_RECIP
);
17063 ftype
= builtin_function_type (SFmode
, SFmode
, SFmode
, VOIDmode
,
17064 RS6000_BUILTIN_RECIPF
, "__builtin_recipdivf");
17065 def_builtin ("__builtin_recipdivf", ftype
, RS6000_BUILTIN_RECIPF
);
17067 ftype
= builtin_function_type (DFmode
, DFmode
, VOIDmode
, VOIDmode
,
17068 RS6000_BUILTIN_RSQRT
, "__builtin_rsqrt");
17069 def_builtin ("__builtin_rsqrt", ftype
, RS6000_BUILTIN_RSQRT
);
17071 ftype
= builtin_function_type (SFmode
, SFmode
, VOIDmode
, VOIDmode
,
17072 RS6000_BUILTIN_RSQRTF
, "__builtin_rsqrtf");
17073 def_builtin ("__builtin_rsqrtf", ftype
, RS6000_BUILTIN_RSQRTF
);
17075 mode
= (TARGET_64BIT
) ? DImode
: SImode
;
17076 ftype
= builtin_function_type (mode
, mode
, mode
, VOIDmode
,
17077 POWER7_BUILTIN_BPERMD
, "__builtin_bpermd");
17078 def_builtin ("__builtin_bpermd", ftype
, POWER7_BUILTIN_BPERMD
);
17080 ftype
= build_function_type_list (unsigned_intDI_type_node
,
17082 def_builtin ("__builtin_ppc_get_timebase", ftype
, RS6000_BUILTIN_GET_TB
);
17085 ftype
= build_function_type_list (unsigned_intDI_type_node
,
17088 ftype
= build_function_type_list (unsigned_intSI_type_node
,
17090 def_builtin ("__builtin_ppc_mftb", ftype
, RS6000_BUILTIN_MFTB
);
17092 ftype
= build_function_type_list (double_type_node
, NULL_TREE
);
17093 def_builtin ("__builtin_mffs", ftype
, RS6000_BUILTIN_MFFS
);
17095 ftype
= build_function_type_list (void_type_node
,
17096 intSI_type_node
, double_type_node
,
17098 def_builtin ("__builtin_mtfsf", ftype
, RS6000_BUILTIN_MTFSF
);
17100 ftype
= build_function_type_list (void_type_node
, NULL_TREE
);
17101 def_builtin ("__builtin_cpu_init", ftype
, RS6000_BUILTIN_CPU_INIT
);
17103 ftype
= build_function_type_list (bool_int_type_node
, const_ptr_type_node
,
17105 def_builtin ("__builtin_cpu_is", ftype
, RS6000_BUILTIN_CPU_IS
);
17106 def_builtin ("__builtin_cpu_supports", ftype
, RS6000_BUILTIN_CPU_SUPPORTS
);
17108 /* AIX libm provides clog as __clog. */
17109 if (TARGET_XCOFF
&&
17110 (tdecl
= builtin_decl_explicit (BUILT_IN_CLOG
)) != NULL_TREE
)
17111 set_user_assembler_name (tdecl
, "__clog");
17113 #ifdef SUBTARGET_INIT_BUILTINS
17114 SUBTARGET_INIT_BUILTINS
;
17118 /* Returns the rs6000 builtin decl for CODE. */
17121 rs6000_builtin_decl (unsigned code
, bool initialize_p ATTRIBUTE_UNUSED
)
17123 HOST_WIDE_INT fnmask
;
17125 if (code
>= RS6000_BUILTIN_COUNT
)
17126 return error_mark_node
;
17128 fnmask
= rs6000_builtin_info
[code
].mask
;
17129 if ((fnmask
& rs6000_builtin_mask
) != fnmask
)
17131 rs6000_invalid_builtin ((enum rs6000_builtins
)code
);
17132 return error_mark_node
;
17135 return rs6000_builtin_decls
[code
];
17139 paired_init_builtins (void)
17141 const struct builtin_description
*d
;
17143 HOST_WIDE_INT builtin_mask
= rs6000_builtin_mask
;
17145 tree int_ftype_int_v2sf_v2sf
17146 = build_function_type_list (integer_type_node
,
17151 tree pcfloat_type_node
=
17152 build_pointer_type (build_qualified_type
17153 (float_type_node
, TYPE_QUAL_CONST
));
17155 tree v2sf_ftype_long_pcfloat
= build_function_type_list (V2SF_type_node
,
17156 long_integer_type_node
,
17159 tree void_ftype_v2sf_long_pcfloat
=
17160 build_function_type_list (void_type_node
,
17162 long_integer_type_node
,
17167 def_builtin ("__builtin_paired_lx", v2sf_ftype_long_pcfloat
,
17168 PAIRED_BUILTIN_LX
);
17171 def_builtin ("__builtin_paired_stx", void_ftype_v2sf_long_pcfloat
,
17172 PAIRED_BUILTIN_STX
);
17175 d
= bdesc_paired_preds
;
17176 for (i
= 0; i
< ARRAY_SIZE (bdesc_paired_preds
); ++i
, d
++)
17179 HOST_WIDE_INT mask
= d
->mask
;
17181 if ((mask
& builtin_mask
) != mask
)
17183 if (TARGET_DEBUG_BUILTIN
)
17184 fprintf (stderr
, "paired_init_builtins, skip predicate %s\n",
17189 /* Cannot define builtin if the instruction is disabled. */
17190 gcc_assert (d
->icode
!= CODE_FOR_nothing
);
17192 if (TARGET_DEBUG_BUILTIN
)
17193 fprintf (stderr
, "paired pred #%d, insn = %s [%d], mode = %s\n",
17194 (int)i
, get_insn_name (d
->icode
), (int)d
->icode
,
17195 GET_MODE_NAME (insn_data
[d
->icode
].operand
[1].mode
));
17197 switch (insn_data
[d
->icode
].operand
[1].mode
)
17200 type
= int_ftype_int_v2sf_v2sf
;
17203 gcc_unreachable ();
17206 def_builtin (d
->name
, type
, d
->code
);
17211 altivec_init_builtins (void)
17213 const struct builtin_description
*d
;
17217 HOST_WIDE_INT builtin_mask
= rs6000_builtin_mask
;
17219 tree pvoid_type_node
= build_pointer_type (void_type_node
);
17221 tree pcvoid_type_node
17222 = build_pointer_type (build_qualified_type (void_type_node
,
17225 tree int_ftype_opaque
17226 = build_function_type_list (integer_type_node
,
17227 opaque_V4SI_type_node
, NULL_TREE
);
17228 tree opaque_ftype_opaque
17229 = build_function_type_list (integer_type_node
, NULL_TREE
);
17230 tree opaque_ftype_opaque_int
17231 = build_function_type_list (opaque_V4SI_type_node
,
17232 opaque_V4SI_type_node
, integer_type_node
, NULL_TREE
);
17233 tree opaque_ftype_opaque_opaque_int
17234 = build_function_type_list (opaque_V4SI_type_node
,
17235 opaque_V4SI_type_node
, opaque_V4SI_type_node
,
17236 integer_type_node
, NULL_TREE
);
17237 tree opaque_ftype_opaque_opaque_opaque
17238 = build_function_type_list (opaque_V4SI_type_node
,
17239 opaque_V4SI_type_node
, opaque_V4SI_type_node
,
17240 opaque_V4SI_type_node
, NULL_TREE
);
17241 tree opaque_ftype_opaque_opaque
17242 = build_function_type_list (opaque_V4SI_type_node
,
17243 opaque_V4SI_type_node
, opaque_V4SI_type_node
,
17245 tree int_ftype_int_opaque_opaque
17246 = build_function_type_list (integer_type_node
,
17247 integer_type_node
, opaque_V4SI_type_node
,
17248 opaque_V4SI_type_node
, NULL_TREE
);
17249 tree int_ftype_int_v4si_v4si
17250 = build_function_type_list (integer_type_node
,
17251 integer_type_node
, V4SI_type_node
,
17252 V4SI_type_node
, NULL_TREE
);
17253 tree int_ftype_int_v2di_v2di
17254 = build_function_type_list (integer_type_node
,
17255 integer_type_node
, V2DI_type_node
,
17256 V2DI_type_node
, NULL_TREE
);
17257 tree void_ftype_v4si
17258 = build_function_type_list (void_type_node
, V4SI_type_node
, NULL_TREE
);
17259 tree v8hi_ftype_void
17260 = build_function_type_list (V8HI_type_node
, NULL_TREE
);
17261 tree void_ftype_void
17262 = build_function_type_list (void_type_node
, NULL_TREE
);
17263 tree void_ftype_int
17264 = build_function_type_list (void_type_node
, integer_type_node
, NULL_TREE
);
17266 tree opaque_ftype_long_pcvoid
17267 = build_function_type_list (opaque_V4SI_type_node
,
17268 long_integer_type_node
, pcvoid_type_node
,
17270 tree v16qi_ftype_long_pcvoid
17271 = build_function_type_list (V16QI_type_node
,
17272 long_integer_type_node
, pcvoid_type_node
,
17274 tree v8hi_ftype_long_pcvoid
17275 = build_function_type_list (V8HI_type_node
,
17276 long_integer_type_node
, pcvoid_type_node
,
17278 tree v4si_ftype_long_pcvoid
17279 = build_function_type_list (V4SI_type_node
,
17280 long_integer_type_node
, pcvoid_type_node
,
17282 tree v4sf_ftype_long_pcvoid
17283 = build_function_type_list (V4SF_type_node
,
17284 long_integer_type_node
, pcvoid_type_node
,
17286 tree v2df_ftype_long_pcvoid
17287 = build_function_type_list (V2DF_type_node
,
17288 long_integer_type_node
, pcvoid_type_node
,
17290 tree v2di_ftype_long_pcvoid
17291 = build_function_type_list (V2DI_type_node
,
17292 long_integer_type_node
, pcvoid_type_node
,
17295 tree void_ftype_opaque_long_pvoid
17296 = build_function_type_list (void_type_node
,
17297 opaque_V4SI_type_node
, long_integer_type_node
,
17298 pvoid_type_node
, NULL_TREE
);
17299 tree void_ftype_v4si_long_pvoid
17300 = build_function_type_list (void_type_node
,
17301 V4SI_type_node
, long_integer_type_node
,
17302 pvoid_type_node
, NULL_TREE
);
17303 tree void_ftype_v16qi_long_pvoid
17304 = build_function_type_list (void_type_node
,
17305 V16QI_type_node
, long_integer_type_node
,
17306 pvoid_type_node
, NULL_TREE
);
17308 tree void_ftype_v16qi_pvoid_long
17309 = build_function_type_list (void_type_node
,
17310 V16QI_type_node
, pvoid_type_node
,
17311 long_integer_type_node
, NULL_TREE
);
17313 tree void_ftype_v8hi_long_pvoid
17314 = build_function_type_list (void_type_node
,
17315 V8HI_type_node
, long_integer_type_node
,
17316 pvoid_type_node
, NULL_TREE
);
17317 tree void_ftype_v4sf_long_pvoid
17318 = build_function_type_list (void_type_node
,
17319 V4SF_type_node
, long_integer_type_node
,
17320 pvoid_type_node
, NULL_TREE
);
17321 tree void_ftype_v2df_long_pvoid
17322 = build_function_type_list (void_type_node
,
17323 V2DF_type_node
, long_integer_type_node
,
17324 pvoid_type_node
, NULL_TREE
);
17325 tree void_ftype_v2di_long_pvoid
17326 = build_function_type_list (void_type_node
,
17327 V2DI_type_node
, long_integer_type_node
,
17328 pvoid_type_node
, NULL_TREE
);
17329 tree int_ftype_int_v8hi_v8hi
17330 = build_function_type_list (integer_type_node
,
17331 integer_type_node
, V8HI_type_node
,
17332 V8HI_type_node
, NULL_TREE
);
17333 tree int_ftype_int_v16qi_v16qi
17334 = build_function_type_list (integer_type_node
,
17335 integer_type_node
, V16QI_type_node
,
17336 V16QI_type_node
, NULL_TREE
);
17337 tree int_ftype_int_v4sf_v4sf
17338 = build_function_type_list (integer_type_node
,
17339 integer_type_node
, V4SF_type_node
,
17340 V4SF_type_node
, NULL_TREE
);
17341 tree int_ftype_int_v2df_v2df
17342 = build_function_type_list (integer_type_node
,
17343 integer_type_node
, V2DF_type_node
,
17344 V2DF_type_node
, NULL_TREE
);
17345 tree v2di_ftype_v2di
17346 = build_function_type_list (V2DI_type_node
, V2DI_type_node
, NULL_TREE
);
17347 tree v4si_ftype_v4si
17348 = build_function_type_list (V4SI_type_node
, V4SI_type_node
, NULL_TREE
);
17349 tree v8hi_ftype_v8hi
17350 = build_function_type_list (V8HI_type_node
, V8HI_type_node
, NULL_TREE
);
17351 tree v16qi_ftype_v16qi
17352 = build_function_type_list (V16QI_type_node
, V16QI_type_node
, NULL_TREE
);
17353 tree v4sf_ftype_v4sf
17354 = build_function_type_list (V4SF_type_node
, V4SF_type_node
, NULL_TREE
);
17355 tree v2df_ftype_v2df
17356 = build_function_type_list (V2DF_type_node
, V2DF_type_node
, NULL_TREE
);
17357 tree void_ftype_pcvoid_int_int
17358 = build_function_type_list (void_type_node
,
17359 pcvoid_type_node
, integer_type_node
,
17360 integer_type_node
, NULL_TREE
);
17362 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si
, ALTIVEC_BUILTIN_MTVSCR
);
17363 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void
, ALTIVEC_BUILTIN_MFVSCR
);
17364 def_builtin ("__builtin_altivec_dssall", void_ftype_void
, ALTIVEC_BUILTIN_DSSALL
);
17365 def_builtin ("__builtin_altivec_dss", void_ftype_int
, ALTIVEC_BUILTIN_DSS
);
17366 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVSL
);
17367 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVSR
);
17368 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVEBX
);
17369 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVEHX
);
17370 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVEWX
);
17371 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVXL
);
17372 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid
,
17373 ALTIVEC_BUILTIN_LVXL_V2DF
);
17374 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid
,
17375 ALTIVEC_BUILTIN_LVXL_V2DI
);
17376 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid
,
17377 ALTIVEC_BUILTIN_LVXL_V4SF
);
17378 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid
,
17379 ALTIVEC_BUILTIN_LVXL_V4SI
);
17380 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid
,
17381 ALTIVEC_BUILTIN_LVXL_V8HI
);
17382 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid
,
17383 ALTIVEC_BUILTIN_LVXL_V16QI
);
17384 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVX
);
17385 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid
,
17386 ALTIVEC_BUILTIN_LVX_V2DF
);
17387 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid
,
17388 ALTIVEC_BUILTIN_LVX_V2DI
);
17389 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid
,
17390 ALTIVEC_BUILTIN_LVX_V4SF
);
17391 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid
,
17392 ALTIVEC_BUILTIN_LVX_V4SI
);
17393 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid
,
17394 ALTIVEC_BUILTIN_LVX_V8HI
);
17395 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid
,
17396 ALTIVEC_BUILTIN_LVX_V16QI
);
17397 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid
, ALTIVEC_BUILTIN_STVX
);
17398 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid
,
17399 ALTIVEC_BUILTIN_STVX_V2DF
);
17400 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid
,
17401 ALTIVEC_BUILTIN_STVX_V2DI
);
17402 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid
,
17403 ALTIVEC_BUILTIN_STVX_V4SF
);
17404 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid
,
17405 ALTIVEC_BUILTIN_STVX_V4SI
);
17406 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid
,
17407 ALTIVEC_BUILTIN_STVX_V8HI
);
17408 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid
,
17409 ALTIVEC_BUILTIN_STVX_V16QI
);
17410 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid
, ALTIVEC_BUILTIN_STVEWX
);
17411 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid
, ALTIVEC_BUILTIN_STVXL
);
17412 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid
,
17413 ALTIVEC_BUILTIN_STVXL_V2DF
);
17414 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid
,
17415 ALTIVEC_BUILTIN_STVXL_V2DI
);
17416 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid
,
17417 ALTIVEC_BUILTIN_STVXL_V4SF
);
17418 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid
,
17419 ALTIVEC_BUILTIN_STVXL_V4SI
);
17420 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid
,
17421 ALTIVEC_BUILTIN_STVXL_V8HI
);
17422 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid
,
17423 ALTIVEC_BUILTIN_STVXL_V16QI
);
17424 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVEBX
);
17425 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid
, ALTIVEC_BUILTIN_STVEHX
);
17426 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LD
);
17427 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LDE
);
17428 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LDL
);
17429 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVSL
);
17430 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVSR
);
17431 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVEBX
);
17432 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVEHX
);
17433 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVEWX
);
17434 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_ST
);
17435 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STE
);
17436 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STL
);
17437 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVEWX
);
17438 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVEBX
);
17439 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVEHX
);
17441 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid
,
17442 VSX_BUILTIN_LXVD2X_V2DF
);
17443 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid
,
17444 VSX_BUILTIN_LXVD2X_V2DI
);
17445 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid
,
17446 VSX_BUILTIN_LXVW4X_V4SF
);
17447 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid
,
17448 VSX_BUILTIN_LXVW4X_V4SI
);
17449 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid
,
17450 VSX_BUILTIN_LXVW4X_V8HI
);
17451 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid
,
17452 VSX_BUILTIN_LXVW4X_V16QI
);
17453 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid
,
17454 VSX_BUILTIN_STXVD2X_V2DF
);
17455 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid
,
17456 VSX_BUILTIN_STXVD2X_V2DI
);
17457 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid
,
17458 VSX_BUILTIN_STXVW4X_V4SF
);
17459 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid
,
17460 VSX_BUILTIN_STXVW4X_V4SI
);
17461 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid
,
17462 VSX_BUILTIN_STXVW4X_V8HI
);
17463 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid
,
17464 VSX_BUILTIN_STXVW4X_V16QI
);
17466 def_builtin ("__builtin_vsx_ld_elemrev_v2df", v2df_ftype_long_pcvoid
,
17467 VSX_BUILTIN_LD_ELEMREV_V2DF
);
17468 def_builtin ("__builtin_vsx_ld_elemrev_v2di", v2di_ftype_long_pcvoid
,
17469 VSX_BUILTIN_LD_ELEMREV_V2DI
);
17470 def_builtin ("__builtin_vsx_ld_elemrev_v4sf", v4sf_ftype_long_pcvoid
,
17471 VSX_BUILTIN_LD_ELEMREV_V4SF
);
17472 def_builtin ("__builtin_vsx_ld_elemrev_v4si", v4si_ftype_long_pcvoid
,
17473 VSX_BUILTIN_LD_ELEMREV_V4SI
);
17474 def_builtin ("__builtin_vsx_st_elemrev_v2df", void_ftype_v2df_long_pvoid
,
17475 VSX_BUILTIN_ST_ELEMREV_V2DF
);
17476 def_builtin ("__builtin_vsx_st_elemrev_v2di", void_ftype_v2di_long_pvoid
,
17477 VSX_BUILTIN_ST_ELEMREV_V2DI
);
17478 def_builtin ("__builtin_vsx_st_elemrev_v4sf", void_ftype_v4sf_long_pvoid
,
17479 VSX_BUILTIN_ST_ELEMREV_V4SF
);
17480 def_builtin ("__builtin_vsx_st_elemrev_v4si", void_ftype_v4si_long_pvoid
,
17481 VSX_BUILTIN_ST_ELEMREV_V4SI
);
17483 def_builtin ("__builtin_vsx_le_be_v8hi", v8hi_ftype_long_pcvoid
,
17484 VSX_BUILTIN_XL_BE_V8HI
);
17485 def_builtin ("__builtin_vsx_le_be_v4si", v4si_ftype_long_pcvoid
,
17486 VSX_BUILTIN_XL_BE_V4SI
);
17487 def_builtin ("__builtin_vsx_le_be_v2di", v2di_ftype_long_pcvoid
,
17488 VSX_BUILTIN_XL_BE_V2DI
);
17489 def_builtin ("__builtin_vsx_le_be_v4sf", v4sf_ftype_long_pcvoid
,
17490 VSX_BUILTIN_XL_BE_V4SF
);
17491 def_builtin ("__builtin_vsx_le_be_v2df", v2df_ftype_long_pcvoid
,
17492 VSX_BUILTIN_XL_BE_V2DF
);
17493 def_builtin ("__builtin_vsx_le_be_v16qi", v16qi_ftype_long_pcvoid
,
17494 VSX_BUILTIN_XL_BE_V16QI
);
17496 if (TARGET_P9_VECTOR
)
17498 def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid
,
17499 VSX_BUILTIN_LD_ELEMREV_V8HI
);
17500 def_builtin ("__builtin_vsx_ld_elemrev_v16qi", v16qi_ftype_long_pcvoid
,
17501 VSX_BUILTIN_LD_ELEMREV_V16QI
);
17502 def_builtin ("__builtin_vsx_st_elemrev_v8hi",
17503 void_ftype_v8hi_long_pvoid
, VSX_BUILTIN_ST_ELEMREV_V8HI
);
17504 def_builtin ("__builtin_vsx_st_elemrev_v16qi",
17505 void_ftype_v16qi_long_pvoid
, VSX_BUILTIN_ST_ELEMREV_V16QI
);
17509 rs6000_builtin_decls
[(int) VSX_BUILTIN_LD_ELEMREV_V8HI
]
17510 = rs6000_builtin_decls
[(int) VSX_BUILTIN_LXVW4X_V8HI
];
17511 rs6000_builtin_decls
[(int) VSX_BUILTIN_LD_ELEMREV_V16QI
]
17512 = rs6000_builtin_decls
[(int) VSX_BUILTIN_LXVW4X_V16QI
];
17513 rs6000_builtin_decls
[(int) VSX_BUILTIN_ST_ELEMREV_V8HI
]
17514 = rs6000_builtin_decls
[(int) VSX_BUILTIN_STXVW4X_V8HI
];
17515 rs6000_builtin_decls
[(int) VSX_BUILTIN_ST_ELEMREV_V16QI
]
17516 = rs6000_builtin_decls
[(int) VSX_BUILTIN_STXVW4X_V16QI
];
17519 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid
,
17520 VSX_BUILTIN_VEC_LD
);
17521 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid
,
17522 VSX_BUILTIN_VEC_ST
);
17523 def_builtin ("__builtin_vec_xl", opaque_ftype_long_pcvoid
,
17524 VSX_BUILTIN_VEC_XL
);
17525 def_builtin ("__builtin_vec_xl_be", opaque_ftype_long_pcvoid
,
17526 VSX_BUILTIN_VEC_XL_BE
);
17527 def_builtin ("__builtin_vec_xst", void_ftype_opaque_long_pvoid
,
17528 VSX_BUILTIN_VEC_XST
);
17530 def_builtin ("__builtin_vec_step", int_ftype_opaque
, ALTIVEC_BUILTIN_VEC_STEP
);
17531 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque
, ALTIVEC_BUILTIN_VEC_SPLATS
);
17532 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque
, ALTIVEC_BUILTIN_VEC_PROMOTE
);
17534 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int
, ALTIVEC_BUILTIN_VEC_SLD
);
17535 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_SPLAT
);
17536 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_EXTRACT
);
17537 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int
, ALTIVEC_BUILTIN_VEC_INSERT
);
17538 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VSPLTW
);
17539 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VSPLTH
);
17540 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VSPLTB
);
17541 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_CTF
);
17542 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VCFSX
);
17543 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VCFUX
);
17544 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_CTS
);
17545 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_CTU
);
17547 def_builtin ("__builtin_vec_adde", opaque_ftype_opaque_opaque_opaque
,
17548 ALTIVEC_BUILTIN_VEC_ADDE
);
17549 def_builtin ("__builtin_vec_addec", opaque_ftype_opaque_opaque_opaque
,
17550 ALTIVEC_BUILTIN_VEC_ADDEC
);
17551 def_builtin ("__builtin_vec_cmpne", opaque_ftype_opaque_opaque
,
17552 ALTIVEC_BUILTIN_VEC_CMPNE
);
17553 def_builtin ("__builtin_vec_mul", opaque_ftype_opaque_opaque
,
17554 ALTIVEC_BUILTIN_VEC_MUL
);
17555 def_builtin ("__builtin_vec_sube", opaque_ftype_opaque_opaque_opaque
,
17556 ALTIVEC_BUILTIN_VEC_SUBE
);
17557 def_builtin ("__builtin_vec_subec", opaque_ftype_opaque_opaque_opaque
,
17558 ALTIVEC_BUILTIN_VEC_SUBEC
);
17560 /* Cell builtins. */
17561 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVLX
);
17562 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVLXL
);
17563 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVRX
);
17564 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVRXL
);
17566 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVLX
);
17567 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVLXL
);
17568 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVRX
);
17569 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVRXL
);
17571 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVLX
);
17572 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVLXL
);
17573 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVRX
);
17574 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVRXL
);
17576 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVLX
);
17577 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVLXL
);
17578 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVRX
);
17579 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVRXL
);
17581 if (TARGET_P9_VECTOR
)
17583 def_builtin ("__builtin_altivec_stxvl", void_ftype_v16qi_pvoid_long
,
17584 P9V_BUILTIN_STXVL
);
17585 def_builtin ("__builtin_xst_len_r", void_ftype_v16qi_pvoid_long
,
17586 P9V_BUILTIN_XST_LEN_R
);
17589 /* Add the DST variants. */
17591 for (i
= 0; i
< ARRAY_SIZE (bdesc_dst
); i
++, d
++)
17593 HOST_WIDE_INT mask
= d
->mask
;
17595 /* It is expected that these dst built-in functions may have
17596 d->icode equal to CODE_FOR_nothing. */
17597 if ((mask
& builtin_mask
) != mask
)
17599 if (TARGET_DEBUG_BUILTIN
)
17600 fprintf (stderr
, "altivec_init_builtins, skip dst %s\n",
17604 def_builtin (d
->name
, void_ftype_pcvoid_int_int
, d
->code
);
17607 /* Initialize the predicates. */
17608 d
= bdesc_altivec_preds
;
17609 for (i
= 0; i
< ARRAY_SIZE (bdesc_altivec_preds
); i
++, d
++)
17611 machine_mode mode1
;
17613 HOST_WIDE_INT mask
= d
->mask
;
17615 if ((mask
& builtin_mask
) != mask
)
17617 if (TARGET_DEBUG_BUILTIN
)
17618 fprintf (stderr
, "altivec_init_builtins, skip predicate %s\n",
17623 if (rs6000_overloaded_builtin_p (d
->code
))
17627 /* Cannot define builtin if the instruction is disabled. */
17628 gcc_assert (d
->icode
!= CODE_FOR_nothing
);
17629 mode1
= insn_data
[d
->icode
].operand
[1].mode
;
17635 type
= int_ftype_int_opaque_opaque
;
17638 type
= int_ftype_int_v2di_v2di
;
17641 type
= int_ftype_int_v4si_v4si
;
17644 type
= int_ftype_int_v8hi_v8hi
;
17647 type
= int_ftype_int_v16qi_v16qi
;
17650 type
= int_ftype_int_v4sf_v4sf
;
17653 type
= int_ftype_int_v2df_v2df
;
17656 gcc_unreachable ();
17659 def_builtin (d
->name
, type
, d
->code
);
17662 /* Initialize the abs* operators. */
17664 for (i
= 0; i
< ARRAY_SIZE (bdesc_abs
); i
++, d
++)
17666 machine_mode mode0
;
17668 HOST_WIDE_INT mask
= d
->mask
;
17670 if ((mask
& builtin_mask
) != mask
)
17672 if (TARGET_DEBUG_BUILTIN
)
17673 fprintf (stderr
, "altivec_init_builtins, skip abs %s\n",
17678 /* Cannot define builtin if the instruction is disabled. */
17679 gcc_assert (d
->icode
!= CODE_FOR_nothing
);
17680 mode0
= insn_data
[d
->icode
].operand
[0].mode
;
17685 type
= v2di_ftype_v2di
;
17688 type
= v4si_ftype_v4si
;
17691 type
= v8hi_ftype_v8hi
;
17694 type
= v16qi_ftype_v16qi
;
17697 type
= v4sf_ftype_v4sf
;
17700 type
= v2df_ftype_v2df
;
17703 gcc_unreachable ();
17706 def_builtin (d
->name
, type
, d
->code
);
17709 /* Initialize target builtin that implements
17710 targetm.vectorize.builtin_mask_for_load. */
17712 decl
= add_builtin_function ("__builtin_altivec_mask_for_load",
17713 v16qi_ftype_long_pcvoid
,
17714 ALTIVEC_BUILTIN_MASK_FOR_LOAD
,
17715 BUILT_IN_MD
, NULL
, NULL_TREE
);
17716 TREE_READONLY (decl
) = 1;
17717 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
17718 altivec_builtin_mask_for_load
= decl
;
17720 /* Access to the vec_init patterns. */
17721 ftype
= build_function_type_list (V4SI_type_node
, integer_type_node
,
17722 integer_type_node
, integer_type_node
,
17723 integer_type_node
, NULL_TREE
);
17724 def_builtin ("__builtin_vec_init_v4si", ftype
, ALTIVEC_BUILTIN_VEC_INIT_V4SI
);
17726 ftype
= build_function_type_list (V8HI_type_node
, short_integer_type_node
,
17727 short_integer_type_node
,
17728 short_integer_type_node
,
17729 short_integer_type_node
,
17730 short_integer_type_node
,
17731 short_integer_type_node
,
17732 short_integer_type_node
,
17733 short_integer_type_node
, NULL_TREE
);
17734 def_builtin ("__builtin_vec_init_v8hi", ftype
, ALTIVEC_BUILTIN_VEC_INIT_V8HI
);
17736 ftype
= build_function_type_list (V16QI_type_node
, char_type_node
,
17737 char_type_node
, char_type_node
,
17738 char_type_node
, char_type_node
,
17739 char_type_node
, char_type_node
,
17740 char_type_node
, char_type_node
,
17741 char_type_node
, char_type_node
,
17742 char_type_node
, char_type_node
,
17743 char_type_node
, char_type_node
,
17744 char_type_node
, NULL_TREE
);
17745 def_builtin ("__builtin_vec_init_v16qi", ftype
,
17746 ALTIVEC_BUILTIN_VEC_INIT_V16QI
);
17748 ftype
= build_function_type_list (V4SF_type_node
, float_type_node
,
17749 float_type_node
, float_type_node
,
17750 float_type_node
, NULL_TREE
);
17751 def_builtin ("__builtin_vec_init_v4sf", ftype
, ALTIVEC_BUILTIN_VEC_INIT_V4SF
);
17753 /* VSX builtins. */
17754 ftype
= build_function_type_list (V2DF_type_node
, double_type_node
,
17755 double_type_node
, NULL_TREE
);
17756 def_builtin ("__builtin_vec_init_v2df", ftype
, VSX_BUILTIN_VEC_INIT_V2DF
);
17758 ftype
= build_function_type_list (V2DI_type_node
, intDI_type_node
,
17759 intDI_type_node
, NULL_TREE
);
17760 def_builtin ("__builtin_vec_init_v2di", ftype
, VSX_BUILTIN_VEC_INIT_V2DI
);
17762 /* Access to the vec_set patterns. */
17763 ftype
= build_function_type_list (V4SI_type_node
, V4SI_type_node
,
17765 integer_type_node
, NULL_TREE
);
17766 def_builtin ("__builtin_vec_set_v4si", ftype
, ALTIVEC_BUILTIN_VEC_SET_V4SI
);
17768 ftype
= build_function_type_list (V8HI_type_node
, V8HI_type_node
,
17770 integer_type_node
, NULL_TREE
);
17771 def_builtin ("__builtin_vec_set_v8hi", ftype
, ALTIVEC_BUILTIN_VEC_SET_V8HI
);
17773 ftype
= build_function_type_list (V16QI_type_node
, V16QI_type_node
,
17775 integer_type_node
, NULL_TREE
);
17776 def_builtin ("__builtin_vec_set_v16qi", ftype
, ALTIVEC_BUILTIN_VEC_SET_V16QI
);
17778 ftype
= build_function_type_list (V4SF_type_node
, V4SF_type_node
,
17780 integer_type_node
, NULL_TREE
);
17781 def_builtin ("__builtin_vec_set_v4sf", ftype
, ALTIVEC_BUILTIN_VEC_SET_V4SF
);
17783 ftype
= build_function_type_list (V2DF_type_node
, V2DF_type_node
,
17785 integer_type_node
, NULL_TREE
);
17786 def_builtin ("__builtin_vec_set_v2df", ftype
, VSX_BUILTIN_VEC_SET_V2DF
);
17788 ftype
= build_function_type_list (V2DI_type_node
, V2DI_type_node
,
17790 integer_type_node
, NULL_TREE
);
17791 def_builtin ("__builtin_vec_set_v2di", ftype
, VSX_BUILTIN_VEC_SET_V2DI
);
17793 /* Access to the vec_extract patterns. */
17794 ftype
= build_function_type_list (intSI_type_node
, V4SI_type_node
,
17795 integer_type_node
, NULL_TREE
);
17796 def_builtin ("__builtin_vec_ext_v4si", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V4SI
);
17798 ftype
= build_function_type_list (intHI_type_node
, V8HI_type_node
,
17799 integer_type_node
, NULL_TREE
);
17800 def_builtin ("__builtin_vec_ext_v8hi", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V8HI
);
17802 ftype
= build_function_type_list (intQI_type_node
, V16QI_type_node
,
17803 integer_type_node
, NULL_TREE
);
17804 def_builtin ("__builtin_vec_ext_v16qi", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V16QI
);
17806 ftype
= build_function_type_list (float_type_node
, V4SF_type_node
,
17807 integer_type_node
, NULL_TREE
);
17808 def_builtin ("__builtin_vec_ext_v4sf", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V4SF
);
17810 ftype
= build_function_type_list (double_type_node
, V2DF_type_node
,
17811 integer_type_node
, NULL_TREE
);
17812 def_builtin ("__builtin_vec_ext_v2df", ftype
, VSX_BUILTIN_VEC_EXT_V2DF
);
17814 ftype
= build_function_type_list (intDI_type_node
, V2DI_type_node
,
17815 integer_type_node
, NULL_TREE
);
17816 def_builtin ("__builtin_vec_ext_v2di", ftype
, VSX_BUILTIN_VEC_EXT_V2DI
);
17819 if (V1TI_type_node
)
17821 tree v1ti_ftype_long_pcvoid
17822 = build_function_type_list (V1TI_type_node
,
17823 long_integer_type_node
, pcvoid_type_node
,
17825 tree void_ftype_v1ti_long_pvoid
17826 = build_function_type_list (void_type_node
,
17827 V1TI_type_node
, long_integer_type_node
,
17828 pvoid_type_node
, NULL_TREE
);
17829 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid
,
17830 VSX_BUILTIN_LXVD2X_V1TI
);
17831 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid
,
17832 VSX_BUILTIN_STXVD2X_V1TI
);
17833 ftype
= build_function_type_list (V1TI_type_node
, intTI_type_node
,
17834 NULL_TREE
, NULL_TREE
);
17835 def_builtin ("__builtin_vec_init_v1ti", ftype
, VSX_BUILTIN_VEC_INIT_V1TI
);
17836 ftype
= build_function_type_list (V1TI_type_node
, V1TI_type_node
,
17838 integer_type_node
, NULL_TREE
);
17839 def_builtin ("__builtin_vec_set_v1ti", ftype
, VSX_BUILTIN_VEC_SET_V1TI
);
17840 ftype
= build_function_type_list (intTI_type_node
, V1TI_type_node
,
17841 integer_type_node
, NULL_TREE
);
17842 def_builtin ("__builtin_vec_ext_v1ti", ftype
, VSX_BUILTIN_VEC_EXT_V1TI
);
17848 htm_init_builtins (void)
17850 HOST_WIDE_INT builtin_mask
= rs6000_builtin_mask
;
17851 const struct builtin_description
*d
;
17855 for (i
= 0; i
< ARRAY_SIZE (bdesc_htm
); i
++, d
++)
17857 tree op
[MAX_HTM_OPERANDS
], type
;
17858 HOST_WIDE_INT mask
= d
->mask
;
17859 unsigned attr
= rs6000_builtin_info
[d
->code
].attr
;
17860 bool void_func
= (attr
& RS6000_BTC_VOID
);
17861 int attr_args
= (attr
& RS6000_BTC_TYPE_MASK
);
17863 tree gpr_type_node
;
17867 /* It is expected that these htm built-in functions may have
17868 d->icode equal to CODE_FOR_nothing. */
17870 if (TARGET_32BIT
&& TARGET_POWERPC64
)
17871 gpr_type_node
= long_long_unsigned_type_node
;
17873 gpr_type_node
= long_unsigned_type_node
;
17875 if (attr
& RS6000_BTC_SPR
)
17877 rettype
= gpr_type_node
;
17878 argtype
= gpr_type_node
;
17880 else if (d
->code
== HTM_BUILTIN_TABORTDC
17881 || d
->code
== HTM_BUILTIN_TABORTDCI
)
17883 rettype
= unsigned_type_node
;
17884 argtype
= gpr_type_node
;
17888 rettype
= unsigned_type_node
;
17889 argtype
= unsigned_type_node
;
17892 if ((mask
& builtin_mask
) != mask
)
17894 if (TARGET_DEBUG_BUILTIN
)
17895 fprintf (stderr
, "htm_builtin, skip binary %s\n", d
->name
);
17901 if (TARGET_DEBUG_BUILTIN
)
17902 fprintf (stderr
, "htm_builtin, bdesc_htm[%ld] no name\n",
17903 (long unsigned) i
);
17907 op
[nopnds
++] = (void_func
) ? void_type_node
: rettype
;
17909 if (attr_args
== RS6000_BTC_UNARY
)
17910 op
[nopnds
++] = argtype
;
17911 else if (attr_args
== RS6000_BTC_BINARY
)
17913 op
[nopnds
++] = argtype
;
17914 op
[nopnds
++] = argtype
;
17916 else if (attr_args
== RS6000_BTC_TERNARY
)
17918 op
[nopnds
++] = argtype
;
17919 op
[nopnds
++] = argtype
;
17920 op
[nopnds
++] = argtype
;
17926 type
= build_function_type_list (op
[0], NULL_TREE
);
17929 type
= build_function_type_list (op
[0], op
[1], NULL_TREE
);
17932 type
= build_function_type_list (op
[0], op
[1], op
[2], NULL_TREE
);
17935 type
= build_function_type_list (op
[0], op
[1], op
[2], op
[3],
17939 gcc_unreachable ();
17942 def_builtin (d
->name
, type
, d
->code
);
17946 /* Hash function for builtin functions with up to 3 arguments and a return
17949 builtin_hasher::hash (builtin_hash_struct
*bh
)
17954 for (i
= 0; i
< 4; i
++)
17956 ret
= (ret
* (unsigned)MAX_MACHINE_MODE
) + ((unsigned)bh
->mode
[i
]);
17957 ret
= (ret
* 2) + bh
->uns_p
[i
];
17963 /* Compare builtin hash entries H1 and H2 for equivalence. */
17965 builtin_hasher::equal (builtin_hash_struct
*p1
, builtin_hash_struct
*p2
)
17967 return ((p1
->mode
[0] == p2
->mode
[0])
17968 && (p1
->mode
[1] == p2
->mode
[1])
17969 && (p1
->mode
[2] == p2
->mode
[2])
17970 && (p1
->mode
[3] == p2
->mode
[3])
17971 && (p1
->uns_p
[0] == p2
->uns_p
[0])
17972 && (p1
->uns_p
[1] == p2
->uns_p
[1])
17973 && (p1
->uns_p
[2] == p2
->uns_p
[2])
17974 && (p1
->uns_p
[3] == p2
->uns_p
[3]));
17977 /* Map types for builtin functions with an explicit return type and up to 3
17978 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
17979 of the argument. */
17981 builtin_function_type (machine_mode mode_ret
, machine_mode mode_arg0
,
17982 machine_mode mode_arg1
, machine_mode mode_arg2
,
17983 enum rs6000_builtins builtin
, const char *name
)
17985 struct builtin_hash_struct h
;
17986 struct builtin_hash_struct
*h2
;
17989 tree ret_type
= NULL_TREE
;
17990 tree arg_type
[3] = { NULL_TREE
, NULL_TREE
, NULL_TREE
};
17992 /* Create builtin_hash_table. */
17993 if (builtin_hash_table
== NULL
)
17994 builtin_hash_table
= hash_table
<builtin_hasher
>::create_ggc (1500);
17996 h
.type
= NULL_TREE
;
17997 h
.mode
[0] = mode_ret
;
17998 h
.mode
[1] = mode_arg0
;
17999 h
.mode
[2] = mode_arg1
;
18000 h
.mode
[3] = mode_arg2
;
18006 /* If the builtin is a type that produces unsigned results or takes unsigned
18007 arguments, and it is returned as a decl for the vectorizer (such as
18008 widening multiplies, permute), make sure the arguments and return value
18009 are type correct. */
18012 /* unsigned 1 argument functions. */
18013 case CRYPTO_BUILTIN_VSBOX
:
18014 case P8V_BUILTIN_VGBBD
:
18015 case MISC_BUILTIN_CDTBCD
:
18016 case MISC_BUILTIN_CBCDTD
:
18021 /* unsigned 2 argument functions. */
18022 case ALTIVEC_BUILTIN_VMULEUB
:
18023 case ALTIVEC_BUILTIN_VMULEUH
:
18024 case ALTIVEC_BUILTIN_VMULEUW
:
18025 case ALTIVEC_BUILTIN_VMULOUB
:
18026 case ALTIVEC_BUILTIN_VMULOUH
:
18027 case ALTIVEC_BUILTIN_VMULOUW
:
18028 case CRYPTO_BUILTIN_VCIPHER
:
18029 case CRYPTO_BUILTIN_VCIPHERLAST
:
18030 case CRYPTO_BUILTIN_VNCIPHER
:
18031 case CRYPTO_BUILTIN_VNCIPHERLAST
:
18032 case CRYPTO_BUILTIN_VPMSUMB
:
18033 case CRYPTO_BUILTIN_VPMSUMH
:
18034 case CRYPTO_BUILTIN_VPMSUMW
:
18035 case CRYPTO_BUILTIN_VPMSUMD
:
18036 case CRYPTO_BUILTIN_VPMSUM
:
18037 case MISC_BUILTIN_ADDG6S
:
18038 case MISC_BUILTIN_DIVWEU
:
18039 case MISC_BUILTIN_DIVWEUO
:
18040 case MISC_BUILTIN_DIVDEU
:
18041 case MISC_BUILTIN_DIVDEUO
:
18042 case VSX_BUILTIN_UDIV_V2DI
:
18043 case ALTIVEC_BUILTIN_VMAXUB
:
18044 case ALTIVEC_BUILTIN_VMINUB
:
18045 case ALTIVEC_BUILTIN_VMAXUH
:
18046 case ALTIVEC_BUILTIN_VMINUH
:
18047 case ALTIVEC_BUILTIN_VMAXUW
:
18048 case ALTIVEC_BUILTIN_VMINUW
:
18049 case P8V_BUILTIN_VMAXUD
:
18050 case P8V_BUILTIN_VMINUD
:
18056 /* unsigned 3 argument functions. */
18057 case ALTIVEC_BUILTIN_VPERM_16QI_UNS
:
18058 case ALTIVEC_BUILTIN_VPERM_8HI_UNS
:
18059 case ALTIVEC_BUILTIN_VPERM_4SI_UNS
:
18060 case ALTIVEC_BUILTIN_VPERM_2DI_UNS
:
18061 case ALTIVEC_BUILTIN_VSEL_16QI_UNS
:
18062 case ALTIVEC_BUILTIN_VSEL_8HI_UNS
:
18063 case ALTIVEC_BUILTIN_VSEL_4SI_UNS
:
18064 case ALTIVEC_BUILTIN_VSEL_2DI_UNS
:
18065 case VSX_BUILTIN_VPERM_16QI_UNS
:
18066 case VSX_BUILTIN_VPERM_8HI_UNS
:
18067 case VSX_BUILTIN_VPERM_4SI_UNS
:
18068 case VSX_BUILTIN_VPERM_2DI_UNS
:
18069 case VSX_BUILTIN_XXSEL_16QI_UNS
:
18070 case VSX_BUILTIN_XXSEL_8HI_UNS
:
18071 case VSX_BUILTIN_XXSEL_4SI_UNS
:
18072 case VSX_BUILTIN_XXSEL_2DI_UNS
:
18073 case CRYPTO_BUILTIN_VPERMXOR
:
18074 case CRYPTO_BUILTIN_VPERMXOR_V2DI
:
18075 case CRYPTO_BUILTIN_VPERMXOR_V4SI
:
18076 case CRYPTO_BUILTIN_VPERMXOR_V8HI
:
18077 case CRYPTO_BUILTIN_VPERMXOR_V16QI
:
18078 case CRYPTO_BUILTIN_VSHASIGMAW
:
18079 case CRYPTO_BUILTIN_VSHASIGMAD
:
18080 case CRYPTO_BUILTIN_VSHASIGMA
:
18087 /* signed permute functions with unsigned char mask. */
18088 case ALTIVEC_BUILTIN_VPERM_16QI
:
18089 case ALTIVEC_BUILTIN_VPERM_8HI
:
18090 case ALTIVEC_BUILTIN_VPERM_4SI
:
18091 case ALTIVEC_BUILTIN_VPERM_4SF
:
18092 case ALTIVEC_BUILTIN_VPERM_2DI
:
18093 case ALTIVEC_BUILTIN_VPERM_2DF
:
18094 case VSX_BUILTIN_VPERM_16QI
:
18095 case VSX_BUILTIN_VPERM_8HI
:
18096 case VSX_BUILTIN_VPERM_4SI
:
18097 case VSX_BUILTIN_VPERM_4SF
:
18098 case VSX_BUILTIN_VPERM_2DI
:
18099 case VSX_BUILTIN_VPERM_2DF
:
18103 /* unsigned args, signed return. */
18104 case VSX_BUILTIN_XVCVUXDSP
:
18105 case VSX_BUILTIN_XVCVUXDDP_UNS
:
18106 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF
:
18110 /* signed args, unsigned return. */
18111 case VSX_BUILTIN_XVCVDPUXDS_UNS
:
18112 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI
:
18113 case MISC_BUILTIN_UNPACK_TD
:
18114 case MISC_BUILTIN_UNPACK_V1TI
:
18118 /* unsigned arguments for 128-bit pack instructions. */
18119 case MISC_BUILTIN_PACK_TD
:
18120 case MISC_BUILTIN_PACK_V1TI
:
18125 /* unsigned second arguments (vector shift right). */
18126 case ALTIVEC_BUILTIN_VSRB
:
18127 case ALTIVEC_BUILTIN_VSRH
:
18128 case ALTIVEC_BUILTIN_VSRW
:
18129 case P8V_BUILTIN_VSRD
:
18137 /* Figure out how many args are present. */
18138 while (num_args
> 0 && h
.mode
[num_args
] == VOIDmode
)
18141 ret_type
= builtin_mode_to_type
[h
.mode
[0]][h
.uns_p
[0]];
18142 if (!ret_type
&& h
.uns_p
[0])
18143 ret_type
= builtin_mode_to_type
[h
.mode
[0]][0];
18146 fatal_error (input_location
,
18147 "internal error: builtin function %qs had an unexpected "
18148 "return type %qs", name
, GET_MODE_NAME (h
.mode
[0]));
18150 for (i
= 0; i
< (int) ARRAY_SIZE (arg_type
); i
++)
18151 arg_type
[i
] = NULL_TREE
;
18153 for (i
= 0; i
< num_args
; i
++)
18155 int m
= (int) h
.mode
[i
+1];
18156 int uns_p
= h
.uns_p
[i
+1];
18158 arg_type
[i
] = builtin_mode_to_type
[m
][uns_p
];
18159 if (!arg_type
[i
] && uns_p
)
18160 arg_type
[i
] = builtin_mode_to_type
[m
][0];
18163 fatal_error (input_location
,
18164 "internal error: builtin function %qs, argument %d "
18165 "had unexpected argument type %qs", name
, i
,
18166 GET_MODE_NAME (m
));
18169 builtin_hash_struct
**found
= builtin_hash_table
->find_slot (&h
, INSERT
);
18170 if (*found
== NULL
)
18172 h2
= ggc_alloc
<builtin_hash_struct
> ();
18176 h2
->type
= build_function_type_list (ret_type
, arg_type
[0], arg_type
[1],
18177 arg_type
[2], NULL_TREE
);
18180 return (*found
)->type
;
18184 rs6000_common_init_builtins (void)
18186 const struct builtin_description
*d
;
18189 tree opaque_ftype_opaque
= NULL_TREE
;
18190 tree opaque_ftype_opaque_opaque
= NULL_TREE
;
18191 tree opaque_ftype_opaque_opaque_opaque
= NULL_TREE
;
18192 tree v2si_ftype
= NULL_TREE
;
18193 tree v2si_ftype_qi
= NULL_TREE
;
18194 tree v2si_ftype_v2si_qi
= NULL_TREE
;
18195 tree v2si_ftype_int_qi
= NULL_TREE
;
18196 HOST_WIDE_INT builtin_mask
= rs6000_builtin_mask
;
18198 if (!TARGET_PAIRED_FLOAT
)
18200 builtin_mode_to_type
[V2SImode
][0] = opaque_V2SI_type_node
;
18201 builtin_mode_to_type
[V2SFmode
][0] = opaque_V2SF_type_node
;
18204 /* Paired builtins are only available if you build a compiler with the
18205 appropriate options, so only create those builtins with the appropriate
18206 compiler option. Create Altivec and VSX builtins on machines with at
18207 least the general purpose extensions (970 and newer) to allow the use of
18208 the target attribute.. */
18210 if (TARGET_EXTRA_BUILTINS
)
18211 builtin_mask
|= RS6000_BTM_COMMON
;
18213 /* Add the ternary operators. */
18215 for (i
= 0; i
< ARRAY_SIZE (bdesc_3arg
); i
++, d
++)
18218 HOST_WIDE_INT mask
= d
->mask
;
18220 if ((mask
& builtin_mask
) != mask
)
18222 if (TARGET_DEBUG_BUILTIN
)
18223 fprintf (stderr
, "rs6000_builtin, skip ternary %s\n", d
->name
);
18227 if (rs6000_overloaded_builtin_p (d
->code
))
18229 if (! (type
= opaque_ftype_opaque_opaque_opaque
))
18230 type
= opaque_ftype_opaque_opaque_opaque
18231 = build_function_type_list (opaque_V4SI_type_node
,
18232 opaque_V4SI_type_node
,
18233 opaque_V4SI_type_node
,
18234 opaque_V4SI_type_node
,
18239 enum insn_code icode
= d
->icode
;
18242 if (TARGET_DEBUG_BUILTIN
)
18243 fprintf (stderr
, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
18249 if (icode
== CODE_FOR_nothing
)
18251 if (TARGET_DEBUG_BUILTIN
)
18252 fprintf (stderr
, "rs6000_builtin, skip ternary %s (no code)\n",
18258 type
= builtin_function_type (insn_data
[icode
].operand
[0].mode
,
18259 insn_data
[icode
].operand
[1].mode
,
18260 insn_data
[icode
].operand
[2].mode
,
18261 insn_data
[icode
].operand
[3].mode
,
18265 def_builtin (d
->name
, type
, d
->code
);
18268 /* Add the binary operators. */
18270 for (i
= 0; i
< ARRAY_SIZE (bdesc_2arg
); i
++, d
++)
18272 machine_mode mode0
, mode1
, mode2
;
18274 HOST_WIDE_INT mask
= d
->mask
;
18276 if ((mask
& builtin_mask
) != mask
)
18278 if (TARGET_DEBUG_BUILTIN
)
18279 fprintf (stderr
, "rs6000_builtin, skip binary %s\n", d
->name
);
18283 if (rs6000_overloaded_builtin_p (d
->code
))
18285 if (! (type
= opaque_ftype_opaque_opaque
))
18286 type
= opaque_ftype_opaque_opaque
18287 = build_function_type_list (opaque_V4SI_type_node
,
18288 opaque_V4SI_type_node
,
18289 opaque_V4SI_type_node
,
18294 enum insn_code icode
= d
->icode
;
18297 if (TARGET_DEBUG_BUILTIN
)
18298 fprintf (stderr
, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
18304 if (icode
== CODE_FOR_nothing
)
18306 if (TARGET_DEBUG_BUILTIN
)
18307 fprintf (stderr
, "rs6000_builtin, skip binary %s (no code)\n",
18313 mode0
= insn_data
[icode
].operand
[0].mode
;
18314 mode1
= insn_data
[icode
].operand
[1].mode
;
18315 mode2
= insn_data
[icode
].operand
[2].mode
;
18317 if (mode0
== V2SImode
&& mode1
== V2SImode
&& mode2
== QImode
)
18319 if (! (type
= v2si_ftype_v2si_qi
))
18320 type
= v2si_ftype_v2si_qi
18321 = build_function_type_list (opaque_V2SI_type_node
,
18322 opaque_V2SI_type_node
,
18327 else if (mode0
== V2SImode
&& GET_MODE_CLASS (mode1
) == MODE_INT
18328 && mode2
== QImode
)
18330 if (! (type
= v2si_ftype_int_qi
))
18331 type
= v2si_ftype_int_qi
18332 = build_function_type_list (opaque_V2SI_type_node
,
18339 type
= builtin_function_type (mode0
, mode1
, mode2
, VOIDmode
,
18343 def_builtin (d
->name
, type
, d
->code
);
18346 /* Add the simple unary operators. */
18348 for (i
= 0; i
< ARRAY_SIZE (bdesc_1arg
); i
++, d
++)
18350 machine_mode mode0
, mode1
;
18352 HOST_WIDE_INT mask
= d
->mask
;
18354 if ((mask
& builtin_mask
) != mask
)
18356 if (TARGET_DEBUG_BUILTIN
)
18357 fprintf (stderr
, "rs6000_builtin, skip unary %s\n", d
->name
);
18361 if (rs6000_overloaded_builtin_p (d
->code
))
18363 if (! (type
= opaque_ftype_opaque
))
18364 type
= opaque_ftype_opaque
18365 = build_function_type_list (opaque_V4SI_type_node
,
18366 opaque_V4SI_type_node
,
18371 enum insn_code icode
= d
->icode
;
18374 if (TARGET_DEBUG_BUILTIN
)
18375 fprintf (stderr
, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
18381 if (icode
== CODE_FOR_nothing
)
18383 if (TARGET_DEBUG_BUILTIN
)
18384 fprintf (stderr
, "rs6000_builtin, skip unary %s (no code)\n",
18390 mode0
= insn_data
[icode
].operand
[0].mode
;
18391 mode1
= insn_data
[icode
].operand
[1].mode
;
18393 if (mode0
== V2SImode
&& mode1
== QImode
)
18395 if (! (type
= v2si_ftype_qi
))
18396 type
= v2si_ftype_qi
18397 = build_function_type_list (opaque_V2SI_type_node
,
18403 type
= builtin_function_type (mode0
, mode1
, VOIDmode
, VOIDmode
,
18407 def_builtin (d
->name
, type
, d
->code
);
18410 /* Add the simple no-argument operators. */
18412 for (i
= 0; i
< ARRAY_SIZE (bdesc_0arg
); i
++, d
++)
18414 machine_mode mode0
;
18416 HOST_WIDE_INT mask
= d
->mask
;
18418 if ((mask
& builtin_mask
) != mask
)
18420 if (TARGET_DEBUG_BUILTIN
)
18421 fprintf (stderr
, "rs6000_builtin, skip no-argument %s\n", d
->name
);
18424 if (rs6000_overloaded_builtin_p (d
->code
))
18426 if (!opaque_ftype_opaque
)
18427 opaque_ftype_opaque
18428 = build_function_type_list (opaque_V4SI_type_node
, NULL_TREE
);
18429 type
= opaque_ftype_opaque
;
18433 enum insn_code icode
= d
->icode
;
18436 if (TARGET_DEBUG_BUILTIN
)
18437 fprintf (stderr
, "rs6000_builtin, bdesc_0arg[%lu] no name\n",
18438 (long unsigned) i
);
18441 if (icode
== CODE_FOR_nothing
)
18443 if (TARGET_DEBUG_BUILTIN
)
18445 "rs6000_builtin, skip no-argument %s (no code)\n",
18449 mode0
= insn_data
[icode
].operand
[0].mode
;
18450 if (mode0
== V2SImode
)
18452 /* code for paired single */
18453 if (! (type
= v2si_ftype
))
18456 = build_function_type_list (opaque_V2SI_type_node
,
18462 type
= builtin_function_type (mode0
, VOIDmode
, VOIDmode
, VOIDmode
,
18465 def_builtin (d
->name
, type
, d
->code
);
18469 /* Set up AIX/Darwin/64-bit Linux quad floating point routines. */
18471 init_float128_ibm (machine_mode mode
)
18473 if (!TARGET_XL_COMPAT
)
18475 set_optab_libfunc (add_optab
, mode
, "__gcc_qadd");
18476 set_optab_libfunc (sub_optab
, mode
, "__gcc_qsub");
18477 set_optab_libfunc (smul_optab
, mode
, "__gcc_qmul");
18478 set_optab_libfunc (sdiv_optab
, mode
, "__gcc_qdiv");
18480 if (!TARGET_HARD_FLOAT
)
18482 set_optab_libfunc (neg_optab
, mode
, "__gcc_qneg");
18483 set_optab_libfunc (eq_optab
, mode
, "__gcc_qeq");
18484 set_optab_libfunc (ne_optab
, mode
, "__gcc_qne");
18485 set_optab_libfunc (gt_optab
, mode
, "__gcc_qgt");
18486 set_optab_libfunc (ge_optab
, mode
, "__gcc_qge");
18487 set_optab_libfunc (lt_optab
, mode
, "__gcc_qlt");
18488 set_optab_libfunc (le_optab
, mode
, "__gcc_qle");
18489 set_optab_libfunc (unord_optab
, mode
, "__gcc_qunord");
18491 set_conv_libfunc (sext_optab
, mode
, SFmode
, "__gcc_stoq");
18492 set_conv_libfunc (sext_optab
, mode
, DFmode
, "__gcc_dtoq");
18493 set_conv_libfunc (trunc_optab
, SFmode
, mode
, "__gcc_qtos");
18494 set_conv_libfunc (trunc_optab
, DFmode
, mode
, "__gcc_qtod");
18495 set_conv_libfunc (sfix_optab
, SImode
, mode
, "__gcc_qtoi");
18496 set_conv_libfunc (ufix_optab
, SImode
, mode
, "__gcc_qtou");
18497 set_conv_libfunc (sfloat_optab
, mode
, SImode
, "__gcc_itoq");
18498 set_conv_libfunc (ufloat_optab
, mode
, SImode
, "__gcc_utoq");
18503 set_optab_libfunc (add_optab
, mode
, "_xlqadd");
18504 set_optab_libfunc (sub_optab
, mode
, "_xlqsub");
18505 set_optab_libfunc (smul_optab
, mode
, "_xlqmul");
18506 set_optab_libfunc (sdiv_optab
, mode
, "_xlqdiv");
18509 /* Add various conversions for IFmode to use the traditional TFmode
18511 if (mode
== IFmode
)
18513 set_conv_libfunc (sext_optab
, mode
, SDmode
, "__dpd_extendsdtf2");
18514 set_conv_libfunc (sext_optab
, mode
, DDmode
, "__dpd_extendddtf2");
18515 set_conv_libfunc (trunc_optab
, mode
, TDmode
, "__dpd_trunctftd2");
18516 set_conv_libfunc (trunc_optab
, SDmode
, mode
, "__dpd_trunctfsd2");
18517 set_conv_libfunc (trunc_optab
, DDmode
, mode
, "__dpd_trunctfdd2");
18518 set_conv_libfunc (sext_optab
, TDmode
, mode
, "__dpd_extendtdtf2");
18520 if (TARGET_POWERPC64
)
18522 set_conv_libfunc (sfix_optab
, TImode
, mode
, "__fixtfti");
18523 set_conv_libfunc (ufix_optab
, TImode
, mode
, "__fixunstfti");
18524 set_conv_libfunc (sfloat_optab
, mode
, TImode
, "__floattitf");
18525 set_conv_libfunc (ufloat_optab
, mode
, TImode
, "__floatuntitf");
18530 /* Set up IEEE 128-bit floating point routines. Use different names if the
18531 arguments can be passed in a vector register. The historical PowerPC
18532 implementation of IEEE 128-bit floating point used _q_<op> for the names, so
18533 continue to use that if we aren't using vector registers to pass IEEE
18534 128-bit floating point. */
18537 init_float128_ieee (machine_mode mode
)
18539 if (FLOAT128_VECTOR_P (mode
))
18541 set_optab_libfunc (add_optab
, mode
, "__addkf3");
18542 set_optab_libfunc (sub_optab
, mode
, "__subkf3");
18543 set_optab_libfunc (neg_optab
, mode
, "__negkf2");
18544 set_optab_libfunc (smul_optab
, mode
, "__mulkf3");
18545 set_optab_libfunc (sdiv_optab
, mode
, "__divkf3");
18546 set_optab_libfunc (sqrt_optab
, mode
, "__sqrtkf2");
18547 set_optab_libfunc (abs_optab
, mode
, "__abstkf2");
18549 set_optab_libfunc (eq_optab
, mode
, "__eqkf2");
18550 set_optab_libfunc (ne_optab
, mode
, "__nekf2");
18551 set_optab_libfunc (gt_optab
, mode
, "__gtkf2");
18552 set_optab_libfunc (ge_optab
, mode
, "__gekf2");
18553 set_optab_libfunc (lt_optab
, mode
, "__ltkf2");
18554 set_optab_libfunc (le_optab
, mode
, "__lekf2");
18555 set_optab_libfunc (unord_optab
, mode
, "__unordkf2");
18557 set_conv_libfunc (sext_optab
, mode
, SFmode
, "__extendsfkf2");
18558 set_conv_libfunc (sext_optab
, mode
, DFmode
, "__extenddfkf2");
18559 set_conv_libfunc (trunc_optab
, SFmode
, mode
, "__trunckfsf2");
18560 set_conv_libfunc (trunc_optab
, DFmode
, mode
, "__trunckfdf2");
18562 set_conv_libfunc (sext_optab
, mode
, IFmode
, "__extendtfkf2");
18563 if (mode
!= TFmode
&& FLOAT128_IBM_P (TFmode
))
18564 set_conv_libfunc (sext_optab
, mode
, TFmode
, "__extendtfkf2");
18566 set_conv_libfunc (trunc_optab
, IFmode
, mode
, "__trunckftf2");
18567 if (mode
!= TFmode
&& FLOAT128_IBM_P (TFmode
))
18568 set_conv_libfunc (trunc_optab
, TFmode
, mode
, "__trunckftf2");
18570 set_conv_libfunc (sext_optab
, mode
, SDmode
, "__dpd_extendsdkf2");
18571 set_conv_libfunc (sext_optab
, mode
, DDmode
, "__dpd_extendddkf2");
18572 set_conv_libfunc (trunc_optab
, mode
, TDmode
, "__dpd_trunckftd2");
18573 set_conv_libfunc (trunc_optab
, SDmode
, mode
, "__dpd_trunckfsd2");
18574 set_conv_libfunc (trunc_optab
, DDmode
, mode
, "__dpd_trunckfdd2");
18575 set_conv_libfunc (sext_optab
, TDmode
, mode
, "__dpd_extendtdkf2");
18577 set_conv_libfunc (sfix_optab
, SImode
, mode
, "__fixkfsi");
18578 set_conv_libfunc (ufix_optab
, SImode
, mode
, "__fixunskfsi");
18579 set_conv_libfunc (sfix_optab
, DImode
, mode
, "__fixkfdi");
18580 set_conv_libfunc (ufix_optab
, DImode
, mode
, "__fixunskfdi");
18582 set_conv_libfunc (sfloat_optab
, mode
, SImode
, "__floatsikf");
18583 set_conv_libfunc (ufloat_optab
, mode
, SImode
, "__floatunsikf");
18584 set_conv_libfunc (sfloat_optab
, mode
, DImode
, "__floatdikf");
18585 set_conv_libfunc (ufloat_optab
, mode
, DImode
, "__floatundikf");
18587 if (TARGET_POWERPC64
)
18589 set_conv_libfunc (sfix_optab
, TImode
, mode
, "__fixkfti");
18590 set_conv_libfunc (ufix_optab
, TImode
, mode
, "__fixunskfti");
18591 set_conv_libfunc (sfloat_optab
, mode
, TImode
, "__floattikf");
18592 set_conv_libfunc (ufloat_optab
, mode
, TImode
, "__floatuntikf");
18598 set_optab_libfunc (add_optab
, mode
, "_q_add");
18599 set_optab_libfunc (sub_optab
, mode
, "_q_sub");
18600 set_optab_libfunc (neg_optab
, mode
, "_q_neg");
18601 set_optab_libfunc (smul_optab
, mode
, "_q_mul");
18602 set_optab_libfunc (sdiv_optab
, mode
, "_q_div");
18603 if (TARGET_PPC_GPOPT
)
18604 set_optab_libfunc (sqrt_optab
, mode
, "_q_sqrt");
18606 set_optab_libfunc (eq_optab
, mode
, "_q_feq");
18607 set_optab_libfunc (ne_optab
, mode
, "_q_fne");
18608 set_optab_libfunc (gt_optab
, mode
, "_q_fgt");
18609 set_optab_libfunc (ge_optab
, mode
, "_q_fge");
18610 set_optab_libfunc (lt_optab
, mode
, "_q_flt");
18611 set_optab_libfunc (le_optab
, mode
, "_q_fle");
18613 set_conv_libfunc (sext_optab
, mode
, SFmode
, "_q_stoq");
18614 set_conv_libfunc (sext_optab
, mode
, DFmode
, "_q_dtoq");
18615 set_conv_libfunc (trunc_optab
, SFmode
, mode
, "_q_qtos");
18616 set_conv_libfunc (trunc_optab
, DFmode
, mode
, "_q_qtod");
18617 set_conv_libfunc (sfix_optab
, SImode
, mode
, "_q_qtoi");
18618 set_conv_libfunc (ufix_optab
, SImode
, mode
, "_q_qtou");
18619 set_conv_libfunc (sfloat_optab
, mode
, SImode
, "_q_itoq");
18620 set_conv_libfunc (ufloat_optab
, mode
, SImode
, "_q_utoq");
18625 rs6000_init_libfuncs (void)
18627 /* __float128 support. */
18628 if (TARGET_FLOAT128_TYPE
)
18630 init_float128_ibm (IFmode
);
18631 init_float128_ieee (KFmode
);
18634 /* AIX/Darwin/64-bit Linux quad floating point routines. */
18635 if (TARGET_LONG_DOUBLE_128
)
18637 if (!TARGET_IEEEQUAD
)
18638 init_float128_ibm (TFmode
);
18640 /* IEEE 128-bit including 32-bit SVR4 quad floating point routines. */
18642 init_float128_ieee (TFmode
);
18646 /* Emit a potentially record-form instruction, setting DST from SRC.
18647 If DOT is 0, that is all; otherwise, set CCREG to the result of the
18648 signed comparison of DST with zero. If DOT is 1, the generated RTL
18649 doesn't care about the DST result; if DOT is 2, it does. If CCREG
18650 is CR0 do a single dot insn (as a PARALLEL); otherwise, do a SET and
18651 a separate COMPARE. */
18654 rs6000_emit_dot_insn (rtx dst
, rtx src
, int dot
, rtx ccreg
)
18658 emit_move_insn (dst
, src
);
18662 if (cc_reg_not_cr0_operand (ccreg
, CCmode
))
18664 emit_move_insn (dst
, src
);
18665 emit_move_insn (ccreg
, gen_rtx_COMPARE (CCmode
, dst
, const0_rtx
));
18669 rtx ccset
= gen_rtx_SET (ccreg
, gen_rtx_COMPARE (CCmode
, src
, const0_rtx
));
18672 rtx clobber
= gen_rtx_CLOBBER (VOIDmode
, dst
);
18673 emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, ccset
, clobber
)));
18677 rtx set
= gen_rtx_SET (dst
, src
);
18678 emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, ccset
, set
)));
18683 /* A validation routine: say whether CODE, a condition code, and MODE
18684 match. The other alternatives either don't make sense or should
18685 never be generated. */
18688 validate_condition_mode (enum rtx_code code
, machine_mode mode
)
18690 gcc_assert ((GET_RTX_CLASS (code
) == RTX_COMPARE
18691 || GET_RTX_CLASS (code
) == RTX_COMM_COMPARE
)
18692 && GET_MODE_CLASS (mode
) == MODE_CC
);
18694 /* These don't make sense. */
18695 gcc_assert ((code
!= GT
&& code
!= LT
&& code
!= GE
&& code
!= LE
)
18696 || mode
!= CCUNSmode
);
18698 gcc_assert ((code
!= GTU
&& code
!= LTU
&& code
!= GEU
&& code
!= LEU
)
18699 || mode
== CCUNSmode
);
18701 gcc_assert (mode
== CCFPmode
18702 || (code
!= ORDERED
&& code
!= UNORDERED
18703 && code
!= UNEQ
&& code
!= LTGT
18704 && code
!= UNGT
&& code
!= UNLT
18705 && code
!= UNGE
&& code
!= UNLE
));
18707 /* These should never be generated except for
18708 flag_finite_math_only. */
18709 gcc_assert (mode
!= CCFPmode
18710 || flag_finite_math_only
18711 || (code
!= LE
&& code
!= GE
18712 && code
!= UNEQ
&& code
!= LTGT
18713 && code
!= UNGT
&& code
!= UNLT
));
18715 /* These are invalid; the information is not there. */
18716 gcc_assert (mode
!= CCEQmode
|| code
== EQ
|| code
== NE
);
18720 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm,
18721 rldicl, rldicr, or rldic instruction in mode MODE. If so, if E is
18722 not zero, store there the bit offset (counted from the right) where
18723 the single stretch of 1 bits begins; and similarly for B, the bit
18724 offset where it ends. */
18727 rs6000_is_valid_mask (rtx mask
, int *b
, int *e
, machine_mode mode
)
18729 unsigned HOST_WIDE_INT val
= INTVAL (mask
);
18730 unsigned HOST_WIDE_INT bit
;
18732 int n
= GET_MODE_PRECISION (mode
);
18734 if (mode
!= DImode
&& mode
!= SImode
)
18737 if (INTVAL (mask
) >= 0)
18740 ne
= exact_log2 (bit
);
18741 nb
= exact_log2 (val
+ bit
);
18743 else if (val
+ 1 == 0)
18752 nb
= exact_log2 (bit
);
18753 ne
= exact_log2 (val
+ bit
);
18758 ne
= exact_log2 (bit
);
18759 if (val
+ bit
== 0)
18767 if (nb
< 0 || ne
< 0 || nb
>= n
|| ne
>= n
)
18778 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm, rldicl,
18779 or rldicr instruction, to implement an AND with it in mode MODE. */
18782 rs6000_is_valid_and_mask (rtx mask
, machine_mode mode
)
18786 if (!rs6000_is_valid_mask (mask
, &nb
, &ne
, mode
))
18789 /* For DImode, we need a rldicl, rldicr, or a rlwinm with mask that
18791 if (mode
== DImode
)
18792 return (ne
== 0 || nb
== 63 || (nb
< 32 && ne
<= nb
));
18794 /* For SImode, rlwinm can do everything. */
18795 if (mode
== SImode
)
18796 return (nb
< 32 && ne
< 32);
18801 /* Return the instruction template for an AND with mask in mode MODE, with
18802 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18805 rs6000_insn_for_and_mask (machine_mode mode
, rtx
*operands
, bool dot
)
18809 if (!rs6000_is_valid_mask (operands
[2], &nb
, &ne
, mode
))
18810 gcc_unreachable ();
18812 if (mode
== DImode
&& ne
== 0)
18814 operands
[3] = GEN_INT (63 - nb
);
18816 return "rldicl. %0,%1,0,%3";
18817 return "rldicl %0,%1,0,%3";
18820 if (mode
== DImode
&& nb
== 63)
18822 operands
[3] = GEN_INT (63 - ne
);
18824 return "rldicr. %0,%1,0,%3";
18825 return "rldicr %0,%1,0,%3";
18828 if (nb
< 32 && ne
< 32)
18830 operands
[3] = GEN_INT (31 - nb
);
18831 operands
[4] = GEN_INT (31 - ne
);
18833 return "rlwinm. %0,%1,0,%3,%4";
18834 return "rlwinm %0,%1,0,%3,%4";
18837 gcc_unreachable ();
18840 /* Return whether MASK (a CONST_INT) is a valid mask for any rlw[i]nm,
18841 rld[i]cl, rld[i]cr, or rld[i]c instruction, to implement an AND with
18842 shift SHIFT (a ROTATE, ASHIFT, or LSHIFTRT) in mode MODE. */
18845 rs6000_is_valid_shift_mask (rtx mask
, rtx shift
, machine_mode mode
)
18849 if (!rs6000_is_valid_mask (mask
, &nb
, &ne
, mode
))
18852 int n
= GET_MODE_PRECISION (mode
);
18855 if (CONST_INT_P (XEXP (shift
, 1)))
18857 sh
= INTVAL (XEXP (shift
, 1));
18858 if (sh
< 0 || sh
>= n
)
18862 rtx_code code
= GET_CODE (shift
);
18864 /* Convert any shift by 0 to a rotate, to simplify below code. */
18868 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18869 if (code
== ROTATE
&& sh
>= 0 && nb
>= ne
&& ne
>= sh
)
18871 if (code
== ROTATE
&& sh
>= 0 && nb
>= ne
&& nb
< sh
)
18877 /* DImode rotates need rld*. */
18878 if (mode
== DImode
&& code
== ROTATE
)
18879 return (nb
== 63 || ne
== 0 || ne
== sh
);
18881 /* SImode rotates need rlw*. */
18882 if (mode
== SImode
&& code
== ROTATE
)
18883 return (nb
< 32 && ne
< 32 && sh
< 32);
18885 /* Wrap-around masks are only okay for rotates. */
18889 /* Variable shifts are only okay for rotates. */
18893 /* Don't allow ASHIFT if the mask is wrong for that. */
18894 if (code
== ASHIFT
&& ne
< sh
)
18897 /* If we can do it with an rlw*, we can do it. Don't allow LSHIFTRT
18898 if the mask is wrong for that. */
18899 if (nb
< 32 && ne
< 32 && sh
< 32
18900 && !(code
== LSHIFTRT
&& nb
>= 32 - sh
))
18903 /* If we can do it with an rld*, we can do it. Don't allow LSHIFTRT
18904 if the mask is wrong for that. */
18905 if (code
== LSHIFTRT
)
18907 if (nb
== 63 || ne
== 0 || ne
== sh
)
18908 return !(code
== LSHIFTRT
&& nb
>= sh
);
18913 /* Return the instruction template for a shift with mask in mode MODE, with
18914 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18917 rs6000_insn_for_shift_mask (machine_mode mode
, rtx
*operands
, bool dot
)
18921 if (!rs6000_is_valid_mask (operands
[3], &nb
, &ne
, mode
))
18922 gcc_unreachable ();
18924 if (mode
== DImode
&& ne
== 0)
18926 if (GET_CODE (operands
[4]) == LSHIFTRT
&& INTVAL (operands
[2]))
18927 operands
[2] = GEN_INT (64 - INTVAL (operands
[2]));
18928 operands
[3] = GEN_INT (63 - nb
);
18930 return "rld%I2cl. %0,%1,%2,%3";
18931 return "rld%I2cl %0,%1,%2,%3";
18934 if (mode
== DImode
&& nb
== 63)
18936 operands
[3] = GEN_INT (63 - ne
);
18938 return "rld%I2cr. %0,%1,%2,%3";
18939 return "rld%I2cr %0,%1,%2,%3";
18943 && GET_CODE (operands
[4]) != LSHIFTRT
18944 && CONST_INT_P (operands
[2])
18945 && ne
== INTVAL (operands
[2]))
18947 operands
[3] = GEN_INT (63 - nb
);
18949 return "rld%I2c. %0,%1,%2,%3";
18950 return "rld%I2c %0,%1,%2,%3";
18953 if (nb
< 32 && ne
< 32)
18955 if (GET_CODE (operands
[4]) == LSHIFTRT
&& INTVAL (operands
[2]))
18956 operands
[2] = GEN_INT (32 - INTVAL (operands
[2]));
18957 operands
[3] = GEN_INT (31 - nb
);
18958 operands
[4] = GEN_INT (31 - ne
);
18959 /* This insn can also be a 64-bit rotate with mask that really makes
18960 it just a shift right (with mask); the %h below are to adjust for
18961 that situation (shift count is >= 32 in that case). */
18963 return "rlw%I2nm. %0,%1,%h2,%3,%4";
18964 return "rlw%I2nm %0,%1,%h2,%3,%4";
18967 gcc_unreachable ();
18970 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwimi or
18971 rldimi instruction, to implement an insert with shift SHIFT (a ROTATE,
18972 ASHIFT, or LSHIFTRT) in mode MODE. */
18975 rs6000_is_valid_insert_mask (rtx mask
, rtx shift
, machine_mode mode
)
18979 if (!rs6000_is_valid_mask (mask
, &nb
, &ne
, mode
))
18982 int n
= GET_MODE_PRECISION (mode
);
18984 int sh
= INTVAL (XEXP (shift
, 1));
18985 if (sh
< 0 || sh
>= n
)
18988 rtx_code code
= GET_CODE (shift
);
18990 /* Convert any shift by 0 to a rotate, to simplify below code. */
18994 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18995 if (code
== ROTATE
&& sh
>= 0 && nb
>= ne
&& ne
>= sh
)
18997 if (code
== ROTATE
&& sh
>= 0 && nb
>= ne
&& nb
< sh
)
19003 /* DImode rotates need rldimi. */
19004 if (mode
== DImode
&& code
== ROTATE
)
19007 /* SImode rotates need rlwimi. */
19008 if (mode
== SImode
&& code
== ROTATE
)
19009 return (nb
< 32 && ne
< 32 && sh
< 32);
19011 /* Wrap-around masks are only okay for rotates. */
19015 /* Don't allow ASHIFT if the mask is wrong for that. */
19016 if (code
== ASHIFT
&& ne
< sh
)
19019 /* If we can do it with an rlwimi, we can do it. Don't allow LSHIFTRT
19020 if the mask is wrong for that. */
19021 if (nb
< 32 && ne
< 32 && sh
< 32
19022 && !(code
== LSHIFTRT
&& nb
>= 32 - sh
))
19025 /* If we can do it with an rldimi, we can do it. Don't allow LSHIFTRT
19026 if the mask is wrong for that. */
19027 if (code
== LSHIFTRT
)
19030 return !(code
== LSHIFTRT
&& nb
>= sh
);
19035 /* Return the instruction template for an insert with mask in mode MODE, with
19036 operands OPERANDS. If DOT is true, make it a record-form instruction. */
19039 rs6000_insn_for_insert_mask (machine_mode mode
, rtx
*operands
, bool dot
)
19043 if (!rs6000_is_valid_mask (operands
[3], &nb
, &ne
, mode
))
19044 gcc_unreachable ();
19046 /* Prefer rldimi because rlwimi is cracked. */
19047 if (TARGET_POWERPC64
19048 && (!dot
|| mode
== DImode
)
19049 && GET_CODE (operands
[4]) != LSHIFTRT
19050 && ne
== INTVAL (operands
[2]))
19052 operands
[3] = GEN_INT (63 - nb
);
19054 return "rldimi. %0,%1,%2,%3";
19055 return "rldimi %0,%1,%2,%3";
19058 if (nb
< 32 && ne
< 32)
19060 if (GET_CODE (operands
[4]) == LSHIFTRT
&& INTVAL (operands
[2]))
19061 operands
[2] = GEN_INT (32 - INTVAL (operands
[2]));
19062 operands
[3] = GEN_INT (31 - nb
);
19063 operands
[4] = GEN_INT (31 - ne
);
19065 return "rlwimi. %0,%1,%2,%3,%4";
19066 return "rlwimi %0,%1,%2,%3,%4";
19069 gcc_unreachable ();
19072 /* Return whether an AND with C (a CONST_INT) in mode MODE can be done
19073 using two machine instructions. */
19076 rs6000_is_valid_2insn_and (rtx c
, machine_mode mode
)
19078 /* There are two kinds of AND we can handle with two insns:
19079 1) those we can do with two rl* insn;
19082 We do not handle that last case yet. */
19084 /* If there is just one stretch of ones, we can do it. */
19085 if (rs6000_is_valid_mask (c
, NULL
, NULL
, mode
))
19088 /* Otherwise, fill in the lowest "hole"; if we can do the result with
19089 one insn, we can do the whole thing with two. */
19090 unsigned HOST_WIDE_INT val
= INTVAL (c
);
19091 unsigned HOST_WIDE_INT bit1
= val
& -val
;
19092 unsigned HOST_WIDE_INT bit2
= (val
+ bit1
) & ~val
;
19093 unsigned HOST_WIDE_INT val1
= (val
+ bit1
) & val
;
19094 unsigned HOST_WIDE_INT bit3
= val1
& -val1
;
19095 return rs6000_is_valid_and_mask (GEN_INT (val
+ bit3
- bit2
), mode
);
19098 /* Emit the two insns to do an AND in mode MODE, with operands OPERANDS.
19099 If EXPAND is true, split rotate-and-mask instructions we generate to
19100 their constituent parts as well (this is used during expand); if DOT
19101 is 1, make the last insn a record-form instruction clobbering the
19102 destination GPR and setting the CC reg (from operands[3]); if 2, set
19103 that GPR as well as the CC reg. */
19106 rs6000_emit_2insn_and (machine_mode mode
, rtx
*operands
, bool expand
, int dot
)
19108 gcc_assert (!(expand
&& dot
));
19110 unsigned HOST_WIDE_INT val
= INTVAL (operands
[2]);
19112 /* If it is one stretch of ones, it is DImode; shift left, mask, then
19113 shift right. This generates better code than doing the masks without
19114 shifts, or shifting first right and then left. */
19116 if (rs6000_is_valid_mask (operands
[2], &nb
, &ne
, mode
) && nb
>= ne
)
19118 gcc_assert (mode
== DImode
);
19120 int shift
= 63 - nb
;
19123 rtx tmp1
= gen_reg_rtx (DImode
);
19124 rtx tmp2
= gen_reg_rtx (DImode
);
19125 emit_insn (gen_ashldi3 (tmp1
, operands
[1], GEN_INT (shift
)));
19126 emit_insn (gen_anddi3 (tmp2
, tmp1
, GEN_INT (val
<< shift
)));
19127 emit_insn (gen_lshrdi3 (operands
[0], tmp2
, GEN_INT (shift
)));
19131 rtx tmp
= gen_rtx_ASHIFT (mode
, operands
[1], GEN_INT (shift
));
19132 tmp
= gen_rtx_AND (mode
, tmp
, GEN_INT (val
<< shift
));
19133 emit_move_insn (operands
[0], tmp
);
19134 tmp
= gen_rtx_LSHIFTRT (mode
, operands
[0], GEN_INT (shift
));
19135 rs6000_emit_dot_insn (operands
[0], tmp
, dot
, dot
? operands
[3] : 0);
19140 /* Otherwise, make a mask2 that cuts out the lowest "hole", and a mask1
19141 that does the rest. */
19142 unsigned HOST_WIDE_INT bit1
= val
& -val
;
19143 unsigned HOST_WIDE_INT bit2
= (val
+ bit1
) & ~val
;
19144 unsigned HOST_WIDE_INT val1
= (val
+ bit1
) & val
;
19145 unsigned HOST_WIDE_INT bit3
= val1
& -val1
;
19147 unsigned HOST_WIDE_INT mask1
= -bit3
+ bit2
- 1;
19148 unsigned HOST_WIDE_INT mask2
= val
+ bit3
- bit2
;
19150 gcc_assert (rs6000_is_valid_and_mask (GEN_INT (mask2
), mode
));
19152 /* Two "no-rotate"-and-mask instructions, for SImode. */
19153 if (rs6000_is_valid_and_mask (GEN_INT (mask1
), mode
))
19155 gcc_assert (mode
== SImode
);
19157 rtx reg
= expand
? gen_reg_rtx (mode
) : operands
[0];
19158 rtx tmp
= gen_rtx_AND (mode
, operands
[1], GEN_INT (mask1
));
19159 emit_move_insn (reg
, tmp
);
19160 tmp
= gen_rtx_AND (mode
, reg
, GEN_INT (mask2
));
19161 rs6000_emit_dot_insn (operands
[0], tmp
, dot
, dot
? operands
[3] : 0);
19165 gcc_assert (mode
== DImode
);
19167 /* Two "no-rotate"-and-mask instructions, for DImode: both are rlwinm
19168 insns; we have to do the first in SImode, because it wraps. */
19169 if (mask2
<= 0xffffffff
19170 && rs6000_is_valid_and_mask (GEN_INT (mask1
), SImode
))
19172 rtx reg
= expand
? gen_reg_rtx (mode
) : operands
[0];
19173 rtx tmp
= gen_rtx_AND (SImode
, gen_lowpart (SImode
, operands
[1]),
19175 rtx reg_low
= gen_lowpart (SImode
, reg
);
19176 emit_move_insn (reg_low
, tmp
);
19177 tmp
= gen_rtx_AND (mode
, reg
, GEN_INT (mask2
));
19178 rs6000_emit_dot_insn (operands
[0], tmp
, dot
, dot
? operands
[3] : 0);
19182 /* Two rld* insns: rotate, clear the hole in the middle (which now is
19183 at the top end), rotate back and clear the other hole. */
19184 int right
= exact_log2 (bit3
);
19185 int left
= 64 - right
;
19187 /* Rotate the mask too. */
19188 mask1
= (mask1
>> right
) | ((bit2
- 1) << left
);
19192 rtx tmp1
= gen_reg_rtx (DImode
);
19193 rtx tmp2
= gen_reg_rtx (DImode
);
19194 rtx tmp3
= gen_reg_rtx (DImode
);
19195 emit_insn (gen_rotldi3 (tmp1
, operands
[1], GEN_INT (left
)));
19196 emit_insn (gen_anddi3 (tmp2
, tmp1
, GEN_INT (mask1
)));
19197 emit_insn (gen_rotldi3 (tmp3
, tmp2
, GEN_INT (right
)));
19198 emit_insn (gen_anddi3 (operands
[0], tmp3
, GEN_INT (mask2
)));
19202 rtx tmp
= gen_rtx_ROTATE (mode
, operands
[1], GEN_INT (left
));
19203 tmp
= gen_rtx_AND (mode
, tmp
, GEN_INT (mask1
));
19204 emit_move_insn (operands
[0], tmp
);
19205 tmp
= gen_rtx_ROTATE (mode
, operands
[0], GEN_INT (right
));
19206 tmp
= gen_rtx_AND (mode
, tmp
, GEN_INT (mask2
));
19207 rs6000_emit_dot_insn (operands
[0], tmp
, dot
, dot
? operands
[3] : 0);
19211 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
19212 for lfq and stfq insns iff the registers are hard registers. */
19215 registers_ok_for_quad_peep (rtx reg1
, rtx reg2
)
19217 /* We might have been passed a SUBREG. */
19218 if (GET_CODE (reg1
) != REG
|| GET_CODE (reg2
) != REG
)
19221 /* We might have been passed non floating point registers. */
19222 if (!FP_REGNO_P (REGNO (reg1
))
19223 || !FP_REGNO_P (REGNO (reg2
)))
19226 return (REGNO (reg1
) == REGNO (reg2
) - 1);
19229 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
19230 addr1 and addr2 must be in consecutive memory locations
19231 (addr2 == addr1 + 8). */
19234 mems_ok_for_quad_peep (rtx mem1
, rtx mem2
)
19237 unsigned int reg1
, reg2
;
19238 int offset1
, offset2
;
19240 /* The mems cannot be volatile. */
19241 if (MEM_VOLATILE_P (mem1
) || MEM_VOLATILE_P (mem2
))
19244 addr1
= XEXP (mem1
, 0);
19245 addr2
= XEXP (mem2
, 0);
19247 /* Extract an offset (if used) from the first addr. */
19248 if (GET_CODE (addr1
) == PLUS
)
19250 /* If not a REG, return zero. */
19251 if (GET_CODE (XEXP (addr1
, 0)) != REG
)
19255 reg1
= REGNO (XEXP (addr1
, 0));
19256 /* The offset must be constant! */
19257 if (GET_CODE (XEXP (addr1
, 1)) != CONST_INT
)
19259 offset1
= INTVAL (XEXP (addr1
, 1));
19262 else if (GET_CODE (addr1
) != REG
)
19266 reg1
= REGNO (addr1
);
19267 /* This was a simple (mem (reg)) expression. Offset is 0. */
19271 /* And now for the second addr. */
19272 if (GET_CODE (addr2
) == PLUS
)
19274 /* If not a REG, return zero. */
19275 if (GET_CODE (XEXP (addr2
, 0)) != REG
)
19279 reg2
= REGNO (XEXP (addr2
, 0));
19280 /* The offset must be constant. */
19281 if (GET_CODE (XEXP (addr2
, 1)) != CONST_INT
)
19283 offset2
= INTVAL (XEXP (addr2
, 1));
19286 else if (GET_CODE (addr2
) != REG
)
19290 reg2
= REGNO (addr2
);
19291 /* This was a simple (mem (reg)) expression. Offset is 0. */
19295 /* Both of these must have the same base register. */
19299 /* The offset for the second addr must be 8 more than the first addr. */
19300 if (offset2
!= offset1
+ 8)
19303 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
19308 /* Implement TARGET_SECONDARY_RELOAD_NEEDED_MODE. For SDmode values we
19309 need to use DDmode, in all other cases we can use the same mode. */
19310 static machine_mode
19311 rs6000_secondary_memory_needed_mode (machine_mode mode
)
19313 if (lra_in_progress
&& mode
== SDmode
)
19318 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
19319 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
19320 only work on the traditional altivec registers, note if an altivec register
19323 static enum rs6000_reg_type
19324 register_to_reg_type (rtx reg
, bool *is_altivec
)
19326 HOST_WIDE_INT regno
;
19327 enum reg_class rclass
;
19329 if (GET_CODE (reg
) == SUBREG
)
19330 reg
= SUBREG_REG (reg
);
19333 return NO_REG_TYPE
;
19335 regno
= REGNO (reg
);
19336 if (regno
>= FIRST_PSEUDO_REGISTER
)
19338 if (!lra_in_progress
&& !reload_completed
)
19339 return PSEUDO_REG_TYPE
;
19341 regno
= true_regnum (reg
);
19342 if (regno
< 0 || regno
>= FIRST_PSEUDO_REGISTER
)
19343 return PSEUDO_REG_TYPE
;
19346 gcc_assert (regno
>= 0);
19348 if (is_altivec
&& ALTIVEC_REGNO_P (regno
))
19349 *is_altivec
= true;
19351 rclass
= rs6000_regno_regclass
[regno
];
19352 return reg_class_to_reg_type
[(int)rclass
];
19355 /* Helper function to return the cost of adding a TOC entry address. */
19358 rs6000_secondary_reload_toc_costs (addr_mask_type addr_mask
)
19362 if (TARGET_CMODEL
!= CMODEL_SMALL
)
19363 ret
= ((addr_mask
& RELOAD_REG_OFFSET
) == 0) ? 1 : 2;
19366 ret
= (TARGET_MINIMAL_TOC
) ? 6 : 3;
19371 /* Helper function for rs6000_secondary_reload to determine whether the memory
19372 address (ADDR) with a given register class (RCLASS) and machine mode (MODE)
19373 needs reloading. Return negative if the memory is not handled by the memory
19374 helper functions and to try a different reload method, 0 if no additional
19375 instructions are need, and positive to give the extra cost for the
19379 rs6000_secondary_reload_memory (rtx addr
,
19380 enum reg_class rclass
,
19383 int extra_cost
= 0;
19384 rtx reg
, and_arg
, plus_arg0
, plus_arg1
;
19385 addr_mask_type addr_mask
;
19386 const char *type
= NULL
;
19387 const char *fail_msg
= NULL
;
19389 if (GPR_REG_CLASS_P (rclass
))
19390 addr_mask
= reg_addr
[mode
].addr_mask
[RELOAD_REG_GPR
];
19392 else if (rclass
== FLOAT_REGS
)
19393 addr_mask
= reg_addr
[mode
].addr_mask
[RELOAD_REG_FPR
];
19395 else if (rclass
== ALTIVEC_REGS
)
19396 addr_mask
= reg_addr
[mode
].addr_mask
[RELOAD_REG_VMX
];
19398 /* For the combined VSX_REGS, turn off Altivec AND -16. */
19399 else if (rclass
== VSX_REGS
)
19400 addr_mask
= (reg_addr
[mode
].addr_mask
[RELOAD_REG_VMX
]
19401 & ~RELOAD_REG_AND_M16
);
19403 /* If the register allocator hasn't made up its mind yet on the register
19404 class to use, settle on defaults to use. */
19405 else if (rclass
== NO_REGS
)
19407 addr_mask
= (reg_addr
[mode
].addr_mask
[RELOAD_REG_ANY
]
19408 & ~RELOAD_REG_AND_M16
);
19410 if ((addr_mask
& RELOAD_REG_MULTIPLE
) != 0)
19411 addr_mask
&= ~(RELOAD_REG_INDEXED
19412 | RELOAD_REG_PRE_INCDEC
19413 | RELOAD_REG_PRE_MODIFY
);
19419 /* If the register isn't valid in this register class, just return now. */
19420 if ((addr_mask
& RELOAD_REG_VALID
) == 0)
19422 if (TARGET_DEBUG_ADDR
)
19425 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19426 "not valid in class\n",
19427 GET_MODE_NAME (mode
), reg_class_names
[rclass
]);
19434 switch (GET_CODE (addr
))
19436 /* Does the register class supports auto update forms for this mode? We
19437 don't need a scratch register, since the powerpc only supports
19438 PRE_INC, PRE_DEC, and PRE_MODIFY. */
19441 reg
= XEXP (addr
, 0);
19442 if (!base_reg_operand (addr
, GET_MODE (reg
)))
19444 fail_msg
= "no base register #1";
19448 else if ((addr_mask
& RELOAD_REG_PRE_INCDEC
) == 0)
19456 reg
= XEXP (addr
, 0);
19457 plus_arg1
= XEXP (addr
, 1);
19458 if (!base_reg_operand (reg
, GET_MODE (reg
))
19459 || GET_CODE (plus_arg1
) != PLUS
19460 || !rtx_equal_p (reg
, XEXP (plus_arg1
, 0)))
19462 fail_msg
= "bad PRE_MODIFY";
19466 else if ((addr_mask
& RELOAD_REG_PRE_MODIFY
) == 0)
19473 /* Do we need to simulate AND -16 to clear the bottom address bits used
19474 in VMX load/stores? Only allow the AND for vector sizes. */
19476 and_arg
= XEXP (addr
, 0);
19477 if (GET_MODE_SIZE (mode
) != 16
19478 || GET_CODE (XEXP (addr
, 1)) != CONST_INT
19479 || INTVAL (XEXP (addr
, 1)) != -16)
19481 fail_msg
= "bad Altivec AND #1";
19485 if (rclass
!= ALTIVEC_REGS
)
19487 if (legitimate_indirect_address_p (and_arg
, false))
19490 else if (legitimate_indexed_address_p (and_arg
, false))
19495 fail_msg
= "bad Altivec AND #2";
19503 /* If this is an indirect address, make sure it is a base register. */
19506 if (!legitimate_indirect_address_p (addr
, false))
19513 /* If this is an indexed address, make sure the register class can handle
19514 indexed addresses for this mode. */
19516 plus_arg0
= XEXP (addr
, 0);
19517 plus_arg1
= XEXP (addr
, 1);
19519 /* (plus (plus (reg) (constant)) (constant)) is generated during
19520 push_reload processing, so handle it now. */
19521 if (GET_CODE (plus_arg0
) == PLUS
&& CONST_INT_P (plus_arg1
))
19523 if ((addr_mask
& RELOAD_REG_OFFSET
) == 0)
19530 /* (plus (plus (reg) (constant)) (reg)) is also generated during
19531 push_reload processing, so handle it now. */
19532 else if (GET_CODE (plus_arg0
) == PLUS
&& REG_P (plus_arg1
))
19534 if ((addr_mask
& RELOAD_REG_INDEXED
) == 0)
19537 type
= "indexed #2";
19541 else if (!base_reg_operand (plus_arg0
, GET_MODE (plus_arg0
)))
19543 fail_msg
= "no base register #2";
19547 else if (int_reg_operand (plus_arg1
, GET_MODE (plus_arg1
)))
19549 if ((addr_mask
& RELOAD_REG_INDEXED
) == 0
19550 || !legitimate_indexed_address_p (addr
, false))
19557 else if ((addr_mask
& RELOAD_REG_QUAD_OFFSET
) != 0
19558 && CONST_INT_P (plus_arg1
))
19560 if (!quad_address_offset_p (INTVAL (plus_arg1
)))
19563 type
= "vector d-form offset";
19567 /* Make sure the register class can handle offset addresses. */
19568 else if (rs6000_legitimate_offset_address_p (mode
, addr
, false, true))
19570 if ((addr_mask
& RELOAD_REG_OFFSET
) == 0)
19573 type
= "offset #2";
19579 fail_msg
= "bad PLUS";
19586 /* Quad offsets are restricted and can't handle normal addresses. */
19587 if ((addr_mask
& RELOAD_REG_QUAD_OFFSET
) != 0)
19590 type
= "vector d-form lo_sum";
19593 else if (!legitimate_lo_sum_address_p (mode
, addr
, false))
19595 fail_msg
= "bad LO_SUM";
19599 if ((addr_mask
& RELOAD_REG_OFFSET
) == 0)
19606 /* Static addresses need to create a TOC entry. */
19610 if ((addr_mask
& RELOAD_REG_QUAD_OFFSET
) != 0)
19613 type
= "vector d-form lo_sum #2";
19619 extra_cost
= rs6000_secondary_reload_toc_costs (addr_mask
);
19623 /* TOC references look like offsetable memory. */
19625 if (TARGET_CMODEL
== CMODEL_SMALL
|| XINT (addr
, 1) != UNSPEC_TOCREL
)
19627 fail_msg
= "bad UNSPEC";
19631 else if ((addr_mask
& RELOAD_REG_QUAD_OFFSET
) != 0)
19634 type
= "vector d-form lo_sum #3";
19637 else if ((addr_mask
& RELOAD_REG_OFFSET
) == 0)
19640 type
= "toc reference";
19646 fail_msg
= "bad address";
19651 if (TARGET_DEBUG_ADDR
/* && extra_cost != 0 */)
19653 if (extra_cost
< 0)
19655 "rs6000_secondary_reload_memory error: mode = %s, "
19656 "class = %s, addr_mask = '%s', %s\n",
19657 GET_MODE_NAME (mode
),
19658 reg_class_names
[rclass
],
19659 rs6000_debug_addr_mask (addr_mask
, false),
19660 (fail_msg
!= NULL
) ? fail_msg
: "<bad address>");
19664 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19665 "addr_mask = '%s', extra cost = %d, %s\n",
19666 GET_MODE_NAME (mode
),
19667 reg_class_names
[rclass
],
19668 rs6000_debug_addr_mask (addr_mask
, false),
19670 (type
) ? type
: "<none>");
19678 /* Helper function for rs6000_secondary_reload to return true if a move to a
19679 different register classe is really a simple move. */
19682 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type
,
19683 enum rs6000_reg_type from_type
,
19686 int size
= GET_MODE_SIZE (mode
);
19688 /* Add support for various direct moves available. In this function, we only
19689 look at cases where we don't need any extra registers, and one or more
19690 simple move insns are issued. Originally small integers are not allowed
19691 in FPR/VSX registers. Single precision binary floating is not a simple
19692 move because we need to convert to the single precision memory layout.
19693 The 4-byte SDmode can be moved. TDmode values are disallowed since they
19694 need special direct move handling, which we do not support yet. */
19695 if (TARGET_DIRECT_MOVE
19696 && ((to_type
== GPR_REG_TYPE
&& from_type
== VSX_REG_TYPE
)
19697 || (to_type
== VSX_REG_TYPE
&& from_type
== GPR_REG_TYPE
)))
19699 if (TARGET_POWERPC64
)
19701 /* ISA 2.07: MTVSRD or MVFVSRD. */
19705 /* ISA 3.0: MTVSRDD or MFVSRD + MFVSRLD. */
19706 if (size
== 16 && TARGET_P9_VECTOR
&& mode
!= TDmode
)
19710 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19711 if (TARGET_P8_VECTOR
)
19713 if (mode
== SImode
)
19716 if (TARGET_P9_VECTOR
&& (mode
== HImode
|| mode
== QImode
))
19720 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19721 if (mode
== SDmode
)
19725 /* Power6+: MFTGPR or MFFGPR. */
19726 else if (TARGET_MFPGPR
&& TARGET_POWERPC64
&& size
== 8
19727 && ((to_type
== GPR_REG_TYPE
&& from_type
== FPR_REG_TYPE
)
19728 || (to_type
== FPR_REG_TYPE
&& from_type
== GPR_REG_TYPE
)))
19731 /* Move to/from SPR. */
19732 else if ((size
== 4 || (TARGET_POWERPC64
&& size
== 8))
19733 && ((to_type
== GPR_REG_TYPE
&& from_type
== SPR_REG_TYPE
)
19734 || (to_type
== SPR_REG_TYPE
&& from_type
== GPR_REG_TYPE
)))
19740 /* Direct move helper function for rs6000_secondary_reload, handle all of the
19741 special direct moves that involve allocating an extra register, return the
19742 insn code of the helper function if there is such a function or
19743 CODE_FOR_nothing if not. */
19746 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type
,
19747 enum rs6000_reg_type from_type
,
19749 secondary_reload_info
*sri
,
19753 enum insn_code icode
= CODE_FOR_nothing
;
19755 int size
= GET_MODE_SIZE (mode
);
19757 if (TARGET_POWERPC64
&& size
== 16)
19759 /* Handle moving 128-bit values from GPRs to VSX point registers on
19760 ISA 2.07 (power8, power9) when running in 64-bit mode using
19761 XXPERMDI to glue the two 64-bit values back together. */
19762 if (to_type
== VSX_REG_TYPE
&& from_type
== GPR_REG_TYPE
)
19764 cost
= 3; /* 2 mtvsrd's, 1 xxpermdi. */
19765 icode
= reg_addr
[mode
].reload_vsx_gpr
;
19768 /* Handle moving 128-bit values from VSX point registers to GPRs on
19769 ISA 2.07 when running in 64-bit mode using XXPERMDI to get access to the
19770 bottom 64-bit value. */
19771 else if (to_type
== GPR_REG_TYPE
&& from_type
== VSX_REG_TYPE
)
19773 cost
= 3; /* 2 mfvsrd's, 1 xxpermdi. */
19774 icode
= reg_addr
[mode
].reload_gpr_vsx
;
19778 else if (TARGET_POWERPC64
&& mode
== SFmode
)
19780 if (to_type
== GPR_REG_TYPE
&& from_type
== VSX_REG_TYPE
)
19782 cost
= 3; /* xscvdpspn, mfvsrd, and. */
19783 icode
= reg_addr
[mode
].reload_gpr_vsx
;
19786 else if (to_type
== VSX_REG_TYPE
&& from_type
== GPR_REG_TYPE
)
19788 cost
= 2; /* mtvsrz, xscvspdpn. */
19789 icode
= reg_addr
[mode
].reload_vsx_gpr
;
19793 else if (!TARGET_POWERPC64
&& size
== 8)
19795 /* Handle moving 64-bit values from GPRs to floating point registers on
19796 ISA 2.07 when running in 32-bit mode using FMRGOW to glue the two
19797 32-bit values back together. Altivec register classes must be handled
19798 specially since a different instruction is used, and the secondary
19799 reload support requires a single instruction class in the scratch
19800 register constraint. However, right now TFmode is not allowed in
19801 Altivec registers, so the pattern will never match. */
19802 if (to_type
== VSX_REG_TYPE
&& from_type
== GPR_REG_TYPE
&& !altivec_p
)
19804 cost
= 3; /* 2 mtvsrwz's, 1 fmrgow. */
19805 icode
= reg_addr
[mode
].reload_fpr_gpr
;
19809 if (icode
!= CODE_FOR_nothing
)
19814 sri
->icode
= icode
;
19815 sri
->extra_cost
= cost
;
19822 /* Return whether a move between two register classes can be done either
19823 directly (simple move) or via a pattern that uses a single extra temporary
19824 (using ISA 2.07's direct move in this case. */
19827 rs6000_secondary_reload_move (enum rs6000_reg_type to_type
,
19828 enum rs6000_reg_type from_type
,
19830 secondary_reload_info
*sri
,
19833 /* Fall back to load/store reloads if either type is not a register. */
19834 if (to_type
== NO_REG_TYPE
|| from_type
== NO_REG_TYPE
)
19837 /* If we haven't allocated registers yet, assume the move can be done for the
19838 standard register types. */
19839 if ((to_type
== PSEUDO_REG_TYPE
&& from_type
== PSEUDO_REG_TYPE
)
19840 || (to_type
== PSEUDO_REG_TYPE
&& IS_STD_REG_TYPE (from_type
))
19841 || (from_type
== PSEUDO_REG_TYPE
&& IS_STD_REG_TYPE (to_type
)))
19844 /* Moves to the same set of registers is a simple move for non-specialized
19846 if (to_type
== from_type
&& IS_STD_REG_TYPE (to_type
))
19849 /* Check whether a simple move can be done directly. */
19850 if (rs6000_secondary_reload_simple_move (to_type
, from_type
, mode
))
19854 sri
->icode
= CODE_FOR_nothing
;
19855 sri
->extra_cost
= 0;
19860 /* Now check if we can do it in a few steps. */
19861 return rs6000_secondary_reload_direct_move (to_type
, from_type
, mode
, sri
,
19865 /* Inform reload about cases where moving X with a mode MODE to a register in
19866 RCLASS requires an extra scratch or immediate register. Return the class
19867 needed for the immediate register.
19869 For VSX and Altivec, we may need a register to convert sp+offset into
19872 For misaligned 64-bit gpr loads and stores we need a register to
19873 convert an offset address to indirect. */
19876 rs6000_secondary_reload (bool in_p
,
19878 reg_class_t rclass_i
,
19880 secondary_reload_info
*sri
)
19882 enum reg_class rclass
= (enum reg_class
) rclass_i
;
19883 reg_class_t ret
= ALL_REGS
;
19884 enum insn_code icode
;
19885 bool default_p
= false;
19886 bool done_p
= false;
19888 /* Allow subreg of memory before/during reload. */
19889 bool memory_p
= (MEM_P (x
)
19890 || (!reload_completed
&& GET_CODE (x
) == SUBREG
19891 && MEM_P (SUBREG_REG (x
))));
19893 sri
->icode
= CODE_FOR_nothing
;
19894 sri
->t_icode
= CODE_FOR_nothing
;
19895 sri
->extra_cost
= 0;
19897 ? reg_addr
[mode
].reload_load
19898 : reg_addr
[mode
].reload_store
);
19900 if (REG_P (x
) || register_operand (x
, mode
))
19902 enum rs6000_reg_type to_type
= reg_class_to_reg_type
[(int)rclass
];
19903 bool altivec_p
= (rclass
== ALTIVEC_REGS
);
19904 enum rs6000_reg_type from_type
= register_to_reg_type (x
, &altivec_p
);
19907 std::swap (to_type
, from_type
);
19909 /* Can we do a direct move of some sort? */
19910 if (rs6000_secondary_reload_move (to_type
, from_type
, mode
, sri
,
19913 icode
= (enum insn_code
)sri
->icode
;
19920 /* Make sure 0.0 is not reloaded or forced into memory. */
19921 if (x
== CONST0_RTX (mode
) && VSX_REG_CLASS_P (rclass
))
19928 /* If this is a scalar floating point value and we want to load it into the
19929 traditional Altivec registers, do it via a move via a traditional floating
19930 point register, unless we have D-form addressing. Also make sure that
19931 non-zero constants use a FPR. */
19932 if (!done_p
&& reg_addr
[mode
].scalar_in_vmx_p
19933 && !mode_supports_vmx_dform (mode
)
19934 && (rclass
== VSX_REGS
|| rclass
== ALTIVEC_REGS
)
19935 && (memory_p
|| (GET_CODE (x
) == CONST_DOUBLE
)))
19942 /* Handle reload of load/stores if we have reload helper functions. */
19943 if (!done_p
&& icode
!= CODE_FOR_nothing
&& memory_p
)
19945 int extra_cost
= rs6000_secondary_reload_memory (XEXP (x
, 0), rclass
,
19948 if (extra_cost
>= 0)
19952 if (extra_cost
> 0)
19954 sri
->extra_cost
= extra_cost
;
19955 sri
->icode
= icode
;
19960 /* Handle unaligned loads and stores of integer registers. */
19961 if (!done_p
&& TARGET_POWERPC64
19962 && reg_class_to_reg_type
[(int)rclass
] == GPR_REG_TYPE
19964 && GET_MODE_SIZE (GET_MODE (x
)) >= UNITS_PER_WORD
)
19966 rtx addr
= XEXP (x
, 0);
19967 rtx off
= address_offset (addr
);
19969 if (off
!= NULL_RTX
)
19971 unsigned int extra
= GET_MODE_SIZE (GET_MODE (x
)) - UNITS_PER_WORD
;
19972 unsigned HOST_WIDE_INT offset
= INTVAL (off
);
19974 /* We need a secondary reload when our legitimate_address_p
19975 says the address is good (as otherwise the entire address
19976 will be reloaded), and the offset is not a multiple of
19977 four or we have an address wrap. Address wrap will only
19978 occur for LO_SUMs since legitimate_offset_address_p
19979 rejects addresses for 16-byte mems that will wrap. */
19980 if (GET_CODE (addr
) == LO_SUM
19981 ? (1 /* legitimate_address_p allows any offset for lo_sum */
19982 && ((offset
& 3) != 0
19983 || ((offset
& 0xffff) ^ 0x8000) >= 0x10000 - extra
))
19984 : (offset
+ 0x8000 < 0x10000 - extra
/* legitimate_address_p */
19985 && (offset
& 3) != 0))
19987 /* -m32 -mpowerpc64 needs to use a 32-bit scratch register. */
19989 sri
->icode
= ((TARGET_32BIT
) ? CODE_FOR_reload_si_load
19990 : CODE_FOR_reload_di_load
);
19992 sri
->icode
= ((TARGET_32BIT
) ? CODE_FOR_reload_si_store
19993 : CODE_FOR_reload_di_store
);
19994 sri
->extra_cost
= 2;
20005 if (!done_p
&& !TARGET_POWERPC64
20006 && reg_class_to_reg_type
[(int)rclass
] == GPR_REG_TYPE
20008 && GET_MODE_SIZE (GET_MODE (x
)) > UNITS_PER_WORD
)
20010 rtx addr
= XEXP (x
, 0);
20011 rtx off
= address_offset (addr
);
20013 if (off
!= NULL_RTX
)
20015 unsigned int extra
= GET_MODE_SIZE (GET_MODE (x
)) - UNITS_PER_WORD
;
20016 unsigned HOST_WIDE_INT offset
= INTVAL (off
);
20018 /* We need a secondary reload when our legitimate_address_p
20019 says the address is good (as otherwise the entire address
20020 will be reloaded), and we have a wrap.
20022 legitimate_lo_sum_address_p allows LO_SUM addresses to
20023 have any offset so test for wrap in the low 16 bits.
20025 legitimate_offset_address_p checks for the range
20026 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
20027 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
20028 [0x7ff4,0x7fff] respectively, so test for the
20029 intersection of these ranges, [0x7ffc,0x7fff] and
20030 [0x7ff4,0x7ff7] respectively.
20032 Note that the address we see here may have been
20033 manipulated by legitimize_reload_address. */
20034 if (GET_CODE (addr
) == LO_SUM
20035 ? ((offset
& 0xffff) ^ 0x8000) >= 0x10000 - extra
20036 : offset
- (0x8000 - extra
) < UNITS_PER_WORD
)
20039 sri
->icode
= CODE_FOR_reload_si_load
;
20041 sri
->icode
= CODE_FOR_reload_si_store
;
20042 sri
->extra_cost
= 2;
20057 ret
= default_secondary_reload (in_p
, x
, rclass
, mode
, sri
);
20059 gcc_assert (ret
!= ALL_REGS
);
20061 if (TARGET_DEBUG_ADDR
)
20064 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
20066 reg_class_names
[ret
],
20067 in_p
? "true" : "false",
20068 reg_class_names
[rclass
],
20069 GET_MODE_NAME (mode
));
20071 if (reload_completed
)
20072 fputs (", after reload", stderr
);
20075 fputs (", done_p not set", stderr
);
20078 fputs (", default secondary reload", stderr
);
20080 if (sri
->icode
!= CODE_FOR_nothing
)
20081 fprintf (stderr
, ", reload func = %s, extra cost = %d",
20082 insn_data
[sri
->icode
].name
, sri
->extra_cost
);
20084 else if (sri
->extra_cost
> 0)
20085 fprintf (stderr
, ", extra cost = %d", sri
->extra_cost
);
20087 fputs ("\n", stderr
);
20094 /* Better tracing for rs6000_secondary_reload_inner. */
20097 rs6000_secondary_reload_trace (int line
, rtx reg
, rtx mem
, rtx scratch
,
20102 gcc_assert (reg
!= NULL_RTX
&& mem
!= NULL_RTX
&& scratch
!= NULL_RTX
);
20104 fprintf (stderr
, "rs6000_secondary_reload_inner:%d, type = %s\n", line
,
20105 store_p
? "store" : "load");
20108 set
= gen_rtx_SET (mem
, reg
);
20110 set
= gen_rtx_SET (reg
, mem
);
20112 clobber
= gen_rtx_CLOBBER (VOIDmode
, scratch
);
20113 debug_rtx (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, set
, clobber
)));
20116 static void rs6000_secondary_reload_fail (int, rtx
, rtx
, rtx
, bool)
20117 ATTRIBUTE_NORETURN
;
20120 rs6000_secondary_reload_fail (int line
, rtx reg
, rtx mem
, rtx scratch
,
20123 rs6000_secondary_reload_trace (line
, reg
, mem
, scratch
, store_p
);
20124 gcc_unreachable ();
20127 /* Fixup reload addresses for values in GPR, FPR, and VMX registers that have
20128 reload helper functions. These were identified in
20129 rs6000_secondary_reload_memory, and if reload decided to use the secondary
20130 reload, it calls the insns:
20131 reload_<RELOAD:mode>_<P:mptrsize>_store
20132 reload_<RELOAD:mode>_<P:mptrsize>_load
20134 which in turn calls this function, to do whatever is necessary to create
20135 valid addresses. */
20138 rs6000_secondary_reload_inner (rtx reg
, rtx mem
, rtx scratch
, bool store_p
)
20140 int regno
= true_regnum (reg
);
20141 machine_mode mode
= GET_MODE (reg
);
20142 addr_mask_type addr_mask
;
20145 rtx op_reg
, op0
, op1
;
20150 if (regno
< 0 || regno
>= FIRST_PSEUDO_REGISTER
|| !MEM_P (mem
)
20151 || !base_reg_operand (scratch
, GET_MODE (scratch
)))
20152 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20154 if (IN_RANGE (regno
, FIRST_GPR_REGNO
, LAST_GPR_REGNO
))
20155 addr_mask
= reg_addr
[mode
].addr_mask
[RELOAD_REG_GPR
];
20157 else if (IN_RANGE (regno
, FIRST_FPR_REGNO
, LAST_FPR_REGNO
))
20158 addr_mask
= reg_addr
[mode
].addr_mask
[RELOAD_REG_FPR
];
20160 else if (IN_RANGE (regno
, FIRST_ALTIVEC_REGNO
, LAST_ALTIVEC_REGNO
))
20161 addr_mask
= reg_addr
[mode
].addr_mask
[RELOAD_REG_VMX
];
20164 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20166 /* Make sure the mode is valid in this register class. */
20167 if ((addr_mask
& RELOAD_REG_VALID
) == 0)
20168 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20170 if (TARGET_DEBUG_ADDR
)
20171 rs6000_secondary_reload_trace (__LINE__
, reg
, mem
, scratch
, store_p
);
20173 new_addr
= addr
= XEXP (mem
, 0);
20174 switch (GET_CODE (addr
))
20176 /* Does the register class support auto update forms for this mode? If
20177 not, do the update now. We don't need a scratch register, since the
20178 powerpc only supports PRE_INC, PRE_DEC, and PRE_MODIFY. */
20181 op_reg
= XEXP (addr
, 0);
20182 if (!base_reg_operand (op_reg
, Pmode
))
20183 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20185 if ((addr_mask
& RELOAD_REG_PRE_INCDEC
) == 0)
20187 emit_insn (gen_add2_insn (op_reg
, GEN_INT (GET_MODE_SIZE (mode
))));
20193 op0
= XEXP (addr
, 0);
20194 op1
= XEXP (addr
, 1);
20195 if (!base_reg_operand (op0
, Pmode
)
20196 || GET_CODE (op1
) != PLUS
20197 || !rtx_equal_p (op0
, XEXP (op1
, 0)))
20198 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20200 if ((addr_mask
& RELOAD_REG_PRE_MODIFY
) == 0)
20202 emit_insn (gen_rtx_SET (op0
, op1
));
20207 /* Do we need to simulate AND -16 to clear the bottom address bits used
20208 in VMX load/stores? */
20210 op0
= XEXP (addr
, 0);
20211 op1
= XEXP (addr
, 1);
20212 if ((addr_mask
& RELOAD_REG_AND_M16
) == 0)
20214 if (REG_P (op0
) || GET_CODE (op0
) == SUBREG
)
20217 else if (GET_CODE (op1
) == PLUS
)
20219 emit_insn (gen_rtx_SET (scratch
, op1
));
20224 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20226 and_op
= gen_rtx_AND (GET_MODE (scratch
), op_reg
, op1
);
20227 cc_clobber
= gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (CCmode
));
20228 rv
= gen_rtvec (2, gen_rtx_SET (scratch
, and_op
), cc_clobber
);
20229 emit_insn (gen_rtx_PARALLEL (VOIDmode
, rv
));
20230 new_addr
= scratch
;
20234 /* If this is an indirect address, make sure it is a base register. */
20237 if (!base_reg_operand (addr
, GET_MODE (addr
)))
20239 emit_insn (gen_rtx_SET (scratch
, addr
));
20240 new_addr
= scratch
;
20244 /* If this is an indexed address, make sure the register class can handle
20245 indexed addresses for this mode. */
20247 op0
= XEXP (addr
, 0);
20248 op1
= XEXP (addr
, 1);
20249 if (!base_reg_operand (op0
, Pmode
))
20250 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20252 else if (int_reg_operand (op1
, Pmode
))
20254 if ((addr_mask
& RELOAD_REG_INDEXED
) == 0)
20256 emit_insn (gen_rtx_SET (scratch
, addr
));
20257 new_addr
= scratch
;
20261 else if (mode_supports_vsx_dform_quad (mode
) && CONST_INT_P (op1
))
20263 if (((addr_mask
& RELOAD_REG_QUAD_OFFSET
) == 0)
20264 || !quad_address_p (addr
, mode
, false))
20266 emit_insn (gen_rtx_SET (scratch
, addr
));
20267 new_addr
= scratch
;
20271 /* Make sure the register class can handle offset addresses. */
20272 else if (rs6000_legitimate_offset_address_p (mode
, addr
, false, true))
20274 if ((addr_mask
& RELOAD_REG_OFFSET
) == 0)
20276 emit_insn (gen_rtx_SET (scratch
, addr
));
20277 new_addr
= scratch
;
20282 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20287 op0
= XEXP (addr
, 0);
20288 op1
= XEXP (addr
, 1);
20289 if (!base_reg_operand (op0
, Pmode
))
20290 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20292 else if (int_reg_operand (op1
, Pmode
))
20294 if ((addr_mask
& RELOAD_REG_INDEXED
) == 0)
20296 emit_insn (gen_rtx_SET (scratch
, addr
));
20297 new_addr
= scratch
;
20301 /* Quad offsets are restricted and can't handle normal addresses. */
20302 else if (mode_supports_vsx_dform_quad (mode
))
20304 emit_insn (gen_rtx_SET (scratch
, addr
));
20305 new_addr
= scratch
;
20308 /* Make sure the register class can handle offset addresses. */
20309 else if (legitimate_lo_sum_address_p (mode
, addr
, false))
20311 if ((addr_mask
& RELOAD_REG_OFFSET
) == 0)
20313 emit_insn (gen_rtx_SET (scratch
, addr
));
20314 new_addr
= scratch
;
20319 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20326 rs6000_emit_move (scratch
, addr
, Pmode
);
20327 new_addr
= scratch
;
20331 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20334 /* Adjust the address if it changed. */
20335 if (addr
!= new_addr
)
20337 mem
= replace_equiv_address_nv (mem
, new_addr
);
20338 if (TARGET_DEBUG_ADDR
)
20339 fprintf (stderr
, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
20342 /* Now create the move. */
20344 emit_insn (gen_rtx_SET (mem
, reg
));
20346 emit_insn (gen_rtx_SET (reg
, mem
));
20351 /* Convert reloads involving 64-bit gprs and misaligned offset
20352 addressing, or multiple 32-bit gprs and offsets that are too large,
20353 to use indirect addressing. */
20356 rs6000_secondary_reload_gpr (rtx reg
, rtx mem
, rtx scratch
, bool store_p
)
20358 int regno
= true_regnum (reg
);
20359 enum reg_class rclass
;
20361 rtx scratch_or_premodify
= scratch
;
20363 if (TARGET_DEBUG_ADDR
)
20365 fprintf (stderr
, "\nrs6000_secondary_reload_gpr, type = %s\n",
20366 store_p
? "store" : "load");
20367 fprintf (stderr
, "reg:\n");
20369 fprintf (stderr
, "mem:\n");
20371 fprintf (stderr
, "scratch:\n");
20372 debug_rtx (scratch
);
20375 gcc_assert (regno
>= 0 && regno
< FIRST_PSEUDO_REGISTER
);
20376 gcc_assert (GET_CODE (mem
) == MEM
);
20377 rclass
= REGNO_REG_CLASS (regno
);
20378 gcc_assert (rclass
== GENERAL_REGS
|| rclass
== BASE_REGS
);
20379 addr
= XEXP (mem
, 0);
20381 if (GET_CODE (addr
) == PRE_MODIFY
)
20383 gcc_assert (REG_P (XEXP (addr
, 0))
20384 && GET_CODE (XEXP (addr
, 1)) == PLUS
20385 && XEXP (XEXP (addr
, 1), 0) == XEXP (addr
, 0));
20386 scratch_or_premodify
= XEXP (addr
, 0);
20387 if (!HARD_REGISTER_P (scratch_or_premodify
))
20388 /* If we have a pseudo here then reload will have arranged
20389 to have it replaced, but only in the original insn.
20390 Use the replacement here too. */
20391 scratch_or_premodify
= find_replacement (&XEXP (addr
, 0));
20393 /* RTL emitted by rs6000_secondary_reload_gpr uses RTL
20394 expressions from the original insn, without unsharing them.
20395 Any RTL that points into the original insn will of course
20396 have register replacements applied. That is why we don't
20397 need to look for replacements under the PLUS. */
20398 addr
= XEXP (addr
, 1);
20400 gcc_assert (GET_CODE (addr
) == PLUS
|| GET_CODE (addr
) == LO_SUM
);
20402 rs6000_emit_move (scratch_or_premodify
, addr
, Pmode
);
20404 mem
= replace_equiv_address_nv (mem
, scratch_or_premodify
);
20406 /* Now create the move. */
20408 emit_insn (gen_rtx_SET (mem
, reg
));
20410 emit_insn (gen_rtx_SET (reg
, mem
));
20415 /* Given an rtx X being reloaded into a reg required to be
20416 in class CLASS, return the class of reg to actually use.
20417 In general this is just CLASS; but on some machines
20418 in some cases it is preferable to use a more restrictive class.
20420 On the RS/6000, we have to return NO_REGS when we want to reload a
20421 floating-point CONST_DOUBLE to force it to be copied to memory.
20423 We also don't want to reload integer values into floating-point
20424 registers if we can at all help it. In fact, this can
20425 cause reload to die, if it tries to generate a reload of CTR
20426 into a FP register and discovers it doesn't have the memory location
20429 ??? Would it be a good idea to have reload do the converse, that is
20430 try to reload floating modes into FP registers if possible?
20433 static enum reg_class
20434 rs6000_preferred_reload_class (rtx x
, enum reg_class rclass
)
20436 machine_mode mode
= GET_MODE (x
);
20437 bool is_constant
= CONSTANT_P (x
);
20439 /* If a mode can't go in FPR/ALTIVEC/VSX registers, don't return a preferred
20440 reload class for it. */
20441 if ((rclass
== ALTIVEC_REGS
|| rclass
== VSX_REGS
)
20442 && (reg_addr
[mode
].addr_mask
[RELOAD_REG_VMX
] & RELOAD_REG_VALID
) == 0)
20445 if ((rclass
== FLOAT_REGS
|| rclass
== VSX_REGS
)
20446 && (reg_addr
[mode
].addr_mask
[RELOAD_REG_FPR
] & RELOAD_REG_VALID
) == 0)
20449 /* For VSX, see if we should prefer FLOAT_REGS or ALTIVEC_REGS. Do not allow
20450 the reloading of address expressions using PLUS into floating point
20452 if (TARGET_VSX
&& VSX_REG_CLASS_P (rclass
) && GET_CODE (x
) != PLUS
)
20456 /* Zero is always allowed in all VSX registers. */
20457 if (x
== CONST0_RTX (mode
))
20460 /* If this is a vector constant that can be formed with a few Altivec
20461 instructions, we want altivec registers. */
20462 if (GET_CODE (x
) == CONST_VECTOR
&& easy_vector_constant (x
, mode
))
20463 return ALTIVEC_REGS
;
20465 /* If this is an integer constant that can easily be loaded into
20466 vector registers, allow it. */
20467 if (CONST_INT_P (x
))
20469 HOST_WIDE_INT value
= INTVAL (x
);
20471 /* ISA 2.07 can generate -1 in all registers with XXLORC. ISA
20472 2.06 can generate it in the Altivec registers with
20476 if (TARGET_P8_VECTOR
)
20478 else if (rclass
== ALTIVEC_REGS
|| rclass
== VSX_REGS
)
20479 return ALTIVEC_REGS
;
20484 /* ISA 3.0 can load -128..127 using the XXSPLTIB instruction and
20485 a sign extend in the Altivec registers. */
20486 if (IN_RANGE (value
, -128, 127) && TARGET_P9_VECTOR
20487 && (rclass
== ALTIVEC_REGS
|| rclass
== VSX_REGS
))
20488 return ALTIVEC_REGS
;
20491 /* Force constant to memory. */
20495 /* D-form addressing can easily reload the value. */
20496 if (mode_supports_vmx_dform (mode
)
20497 || mode_supports_vsx_dform_quad (mode
))
20500 /* If this is a scalar floating point value and we don't have D-form
20501 addressing, prefer the traditional floating point registers so that we
20502 can use D-form (register+offset) addressing. */
20503 if (rclass
== VSX_REGS
20504 && (mode
== SFmode
|| GET_MODE_SIZE (mode
) == 8))
20507 /* Prefer the Altivec registers if Altivec is handling the vector
20508 operations (i.e. V16QI, V8HI, and V4SI), or if we prefer Altivec
20510 if (VECTOR_UNIT_ALTIVEC_P (mode
) || VECTOR_MEM_ALTIVEC_P (mode
)
20511 || mode
== V1TImode
)
20512 return ALTIVEC_REGS
;
20517 if (is_constant
|| GET_CODE (x
) == PLUS
)
20519 if (reg_class_subset_p (GENERAL_REGS
, rclass
))
20520 return GENERAL_REGS
;
20521 if (reg_class_subset_p (BASE_REGS
, rclass
))
20526 if (GET_MODE_CLASS (mode
) == MODE_INT
&& rclass
== NON_SPECIAL_REGS
)
20527 return GENERAL_REGS
;
20532 /* Debug version of rs6000_preferred_reload_class. */
20533 static enum reg_class
20534 rs6000_debug_preferred_reload_class (rtx x
, enum reg_class rclass
)
20536 enum reg_class ret
= rs6000_preferred_reload_class (x
, rclass
);
20539 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
20541 reg_class_names
[ret
], reg_class_names
[rclass
],
20542 GET_MODE_NAME (GET_MODE (x
)));
20548 /* If we are copying between FP or AltiVec registers and anything else, we need
20549 a memory location. The exception is when we are targeting ppc64 and the
20550 move to/from fpr to gpr instructions are available. Also, under VSX, you
20551 can copy vector registers from the FP register set to the Altivec register
20552 set and vice versa. */
20555 rs6000_secondary_memory_needed (machine_mode mode
,
20556 reg_class_t from_class
,
20557 reg_class_t to_class
)
20559 enum rs6000_reg_type from_type
, to_type
;
20560 bool altivec_p
= ((from_class
== ALTIVEC_REGS
)
20561 || (to_class
== ALTIVEC_REGS
));
20563 /* If a simple/direct move is available, we don't need secondary memory */
20564 from_type
= reg_class_to_reg_type
[(int)from_class
];
20565 to_type
= reg_class_to_reg_type
[(int)to_class
];
20567 if (rs6000_secondary_reload_move (to_type
, from_type
, mode
,
20568 (secondary_reload_info
*)0, altivec_p
))
20571 /* If we have a floating point or vector register class, we need to use
20572 memory to transfer the data. */
20573 if (IS_FP_VECT_REG_TYPE (from_type
) || IS_FP_VECT_REG_TYPE (to_type
))
20579 /* Debug version of rs6000_secondary_memory_needed. */
20581 rs6000_debug_secondary_memory_needed (machine_mode mode
,
20582 reg_class_t from_class
,
20583 reg_class_t to_class
)
20585 bool ret
= rs6000_secondary_memory_needed (mode
, from_class
, to_class
);
20588 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
20589 "to_class = %s, mode = %s\n",
20590 ret
? "true" : "false",
20591 reg_class_names
[from_class
],
20592 reg_class_names
[to_class
],
20593 GET_MODE_NAME (mode
));
20598 /* Return the register class of a scratch register needed to copy IN into
20599 or out of a register in RCLASS in MODE. If it can be done directly,
20600 NO_REGS is returned. */
20602 static enum reg_class
20603 rs6000_secondary_reload_class (enum reg_class rclass
, machine_mode mode
,
20608 if (TARGET_ELF
|| (DEFAULT_ABI
== ABI_DARWIN
20610 && MACHOPIC_INDIRECT
20614 /* We cannot copy a symbolic operand directly into anything
20615 other than BASE_REGS for TARGET_ELF. So indicate that a
20616 register from BASE_REGS is needed as an intermediate
20619 On Darwin, pic addresses require a load from memory, which
20620 needs a base register. */
20621 if (rclass
!= BASE_REGS
20622 && (GET_CODE (in
) == SYMBOL_REF
20623 || GET_CODE (in
) == HIGH
20624 || GET_CODE (in
) == LABEL_REF
20625 || GET_CODE (in
) == CONST
))
20629 if (GET_CODE (in
) == REG
)
20631 regno
= REGNO (in
);
20632 if (regno
>= FIRST_PSEUDO_REGISTER
)
20634 regno
= true_regnum (in
);
20635 if (regno
>= FIRST_PSEUDO_REGISTER
)
20639 else if (GET_CODE (in
) == SUBREG
)
20641 regno
= true_regnum (in
);
20642 if (regno
>= FIRST_PSEUDO_REGISTER
)
20648 /* If we have VSX register moves, prefer moving scalar values between
20649 Altivec registers and GPR by going via an FPR (and then via memory)
20650 instead of reloading the secondary memory address for Altivec moves. */
20652 && GET_MODE_SIZE (mode
) < 16
20653 && !mode_supports_vmx_dform (mode
)
20654 && (((rclass
== GENERAL_REGS
|| rclass
== BASE_REGS
)
20655 && (regno
>= 0 && ALTIVEC_REGNO_P (regno
)))
20656 || ((rclass
== VSX_REGS
|| rclass
== ALTIVEC_REGS
)
20657 && (regno
>= 0 && INT_REGNO_P (regno
)))))
20660 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
20662 if (rclass
== GENERAL_REGS
|| rclass
== BASE_REGS
20663 || (regno
>= 0 && INT_REGNO_P (regno
)))
20666 /* Constants, memory, and VSX registers can go into VSX registers (both the
20667 traditional floating point and the altivec registers). */
20668 if (rclass
== VSX_REGS
20669 && (regno
== -1 || VSX_REGNO_P (regno
)))
20672 /* Constants, memory, and FP registers can go into FP registers. */
20673 if ((regno
== -1 || FP_REGNO_P (regno
))
20674 && (rclass
== FLOAT_REGS
|| rclass
== NON_SPECIAL_REGS
))
20675 return (mode
!= SDmode
|| lra_in_progress
) ? NO_REGS
: GENERAL_REGS
;
20677 /* Memory, and AltiVec registers can go into AltiVec registers. */
20678 if ((regno
== -1 || ALTIVEC_REGNO_P (regno
))
20679 && rclass
== ALTIVEC_REGS
)
20682 /* We can copy among the CR registers. */
20683 if ((rclass
== CR_REGS
|| rclass
== CR0_REGS
)
20684 && regno
>= 0 && CR_REGNO_P (regno
))
20687 /* Otherwise, we need GENERAL_REGS. */
20688 return GENERAL_REGS
;
20691 /* Debug version of rs6000_secondary_reload_class. */
20692 static enum reg_class
20693 rs6000_debug_secondary_reload_class (enum reg_class rclass
,
20694 machine_mode mode
, rtx in
)
20696 enum reg_class ret
= rs6000_secondary_reload_class (rclass
, mode
, in
);
20698 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
20699 "mode = %s, input rtx:\n",
20700 reg_class_names
[ret
], reg_class_names
[rclass
],
20701 GET_MODE_NAME (mode
));
20707 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
20710 rs6000_can_change_mode_class (machine_mode from
,
20712 reg_class_t rclass
)
20714 unsigned from_size
= GET_MODE_SIZE (from
);
20715 unsigned to_size
= GET_MODE_SIZE (to
);
20717 if (from_size
!= to_size
)
20719 enum reg_class xclass
= (TARGET_VSX
) ? VSX_REGS
: FLOAT_REGS
;
20721 if (reg_classes_intersect_p (xclass
, rclass
))
20723 unsigned to_nregs
= hard_regno_nregs (FIRST_FPR_REGNO
, to
);
20724 unsigned from_nregs
= hard_regno_nregs (FIRST_FPR_REGNO
, from
);
20725 bool to_float128_vector_p
= FLOAT128_VECTOR_P (to
);
20726 bool from_float128_vector_p
= FLOAT128_VECTOR_P (from
);
20728 /* Don't allow 64-bit types to overlap with 128-bit types that take a
20729 single register under VSX because the scalar part of the register
20730 is in the upper 64-bits, and not the lower 64-bits. Types like
20731 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
20732 IEEE floating point can't overlap, and neither can small
20735 if (to_float128_vector_p
&& from_float128_vector_p
)
20738 else if (to_float128_vector_p
|| from_float128_vector_p
)
20741 /* TDmode in floating-mode registers must always go into a register
20742 pair with the most significant word in the even-numbered register
20743 to match ISA requirements. In little-endian mode, this does not
20744 match subreg numbering, so we cannot allow subregs. */
20745 if (!BYTES_BIG_ENDIAN
&& (to
== TDmode
|| from
== TDmode
))
20748 if (from_size
< 8 || to_size
< 8)
20751 if (from_size
== 8 && (8 * to_nregs
) != to_size
)
20754 if (to_size
== 8 && (8 * from_nregs
) != from_size
)
20763 /* Since the VSX register set includes traditional floating point registers
20764 and altivec registers, just check for the size being different instead of
20765 trying to check whether the modes are vector modes. Otherwise it won't
20766 allow say DF and DI to change classes. For types like TFmode and TDmode
20767 that take 2 64-bit registers, rather than a single 128-bit register, don't
20768 allow subregs of those types to other 128 bit types. */
20769 if (TARGET_VSX
&& VSX_REG_CLASS_P (rclass
))
20771 unsigned num_regs
= (from_size
+ 15) / 16;
20772 if (hard_regno_nregs (FIRST_FPR_REGNO
, to
) > num_regs
20773 || hard_regno_nregs (FIRST_FPR_REGNO
, from
) > num_regs
)
20776 return (from_size
== 8 || from_size
== 16);
20779 if (TARGET_ALTIVEC
&& rclass
== ALTIVEC_REGS
20780 && (ALTIVEC_VECTOR_MODE (from
) + ALTIVEC_VECTOR_MODE (to
)) == 1)
20786 /* Debug version of rs6000_can_change_mode_class. */
20788 rs6000_debug_can_change_mode_class (machine_mode from
,
20790 reg_class_t rclass
)
20792 bool ret
= rs6000_can_change_mode_class (from
, to
, rclass
);
20795 "rs6000_can_change_mode_class, return %s, from = %s, "
20796 "to = %s, rclass = %s\n",
20797 ret
? "true" : "false",
20798 GET_MODE_NAME (from
), GET_MODE_NAME (to
),
20799 reg_class_names
[rclass
]);
20804 /* Return a string to do a move operation of 128 bits of data. */
20807 rs6000_output_move_128bit (rtx operands
[])
20809 rtx dest
= operands
[0];
20810 rtx src
= operands
[1];
20811 machine_mode mode
= GET_MODE (dest
);
20814 bool dest_gpr_p
, dest_fp_p
, dest_vmx_p
, dest_vsx_p
;
20815 bool src_gpr_p
, src_fp_p
, src_vmx_p
, src_vsx_p
;
20819 dest_regno
= REGNO (dest
);
20820 dest_gpr_p
= INT_REGNO_P (dest_regno
);
20821 dest_fp_p
= FP_REGNO_P (dest_regno
);
20822 dest_vmx_p
= ALTIVEC_REGNO_P (dest_regno
);
20823 dest_vsx_p
= dest_fp_p
| dest_vmx_p
;
20828 dest_gpr_p
= dest_fp_p
= dest_vmx_p
= dest_vsx_p
= false;
20833 src_regno
= REGNO (src
);
20834 src_gpr_p
= INT_REGNO_P (src_regno
);
20835 src_fp_p
= FP_REGNO_P (src_regno
);
20836 src_vmx_p
= ALTIVEC_REGNO_P (src_regno
);
20837 src_vsx_p
= src_fp_p
| src_vmx_p
;
20842 src_gpr_p
= src_fp_p
= src_vmx_p
= src_vsx_p
= false;
20845 /* Register moves. */
20846 if (dest_regno
>= 0 && src_regno
>= 0)
20853 if (TARGET_DIRECT_MOVE_128
&& src_vsx_p
)
20854 return (WORDS_BIG_ENDIAN
20855 ? "mfvsrd %0,%x1\n\tmfvsrld %L0,%x1"
20856 : "mfvsrd %L0,%x1\n\tmfvsrld %0,%x1");
20858 else if (TARGET_VSX
&& TARGET_DIRECT_MOVE
&& src_vsx_p
)
20862 else if (TARGET_VSX
&& dest_vsx_p
)
20865 return "xxlor %x0,%x1,%x1";
20867 else if (TARGET_DIRECT_MOVE_128
&& src_gpr_p
)
20868 return (WORDS_BIG_ENDIAN
20869 ? "mtvsrdd %x0,%1,%L1"
20870 : "mtvsrdd %x0,%L1,%1");
20872 else if (TARGET_DIRECT_MOVE
&& src_gpr_p
)
20876 else if (TARGET_ALTIVEC
&& dest_vmx_p
&& src_vmx_p
)
20877 return "vor %0,%1,%1";
20879 else if (dest_fp_p
&& src_fp_p
)
20884 else if (dest_regno
>= 0 && MEM_P (src
))
20888 if (TARGET_QUAD_MEMORY
&& quad_load_store_p (dest
, src
))
20894 else if (TARGET_ALTIVEC
&& dest_vmx_p
20895 && altivec_indexed_or_indirect_operand (src
, mode
))
20896 return "lvx %0,%y1";
20898 else if (TARGET_VSX
&& dest_vsx_p
)
20900 if (mode_supports_vsx_dform_quad (mode
)
20901 && quad_address_p (XEXP (src
, 0), mode
, true))
20902 return "lxv %x0,%1";
20904 else if (TARGET_P9_VECTOR
)
20905 return "lxvx %x0,%y1";
20907 else if (mode
== V16QImode
|| mode
== V8HImode
|| mode
== V4SImode
)
20908 return "lxvw4x %x0,%y1";
20911 return "lxvd2x %x0,%y1";
20914 else if (TARGET_ALTIVEC
&& dest_vmx_p
)
20915 return "lvx %0,%y1";
20917 else if (dest_fp_p
)
20922 else if (src_regno
>= 0 && MEM_P (dest
))
20926 if (TARGET_QUAD_MEMORY
&& quad_load_store_p (dest
, src
))
20927 return "stq %1,%0";
20932 else if (TARGET_ALTIVEC
&& src_vmx_p
20933 && altivec_indexed_or_indirect_operand (src
, mode
))
20934 return "stvx %1,%y0";
20936 else if (TARGET_VSX
&& src_vsx_p
)
20938 if (mode_supports_vsx_dform_quad (mode
)
20939 && quad_address_p (XEXP (dest
, 0), mode
, true))
20940 return "stxv %x1,%0";
20942 else if (TARGET_P9_VECTOR
)
20943 return "stxvx %x1,%y0";
20945 else if (mode
== V16QImode
|| mode
== V8HImode
|| mode
== V4SImode
)
20946 return "stxvw4x %x1,%y0";
20949 return "stxvd2x %x1,%y0";
20952 else if (TARGET_ALTIVEC
&& src_vmx_p
)
20953 return "stvx %1,%y0";
20960 else if (dest_regno
>= 0
20961 && (GET_CODE (src
) == CONST_INT
20962 || GET_CODE (src
) == CONST_WIDE_INT
20963 || GET_CODE (src
) == CONST_DOUBLE
20964 || GET_CODE (src
) == CONST_VECTOR
))
20969 else if ((dest_vmx_p
&& TARGET_ALTIVEC
)
20970 || (dest_vsx_p
&& TARGET_VSX
))
20971 return output_vec_const_move (operands
);
20974 fatal_insn ("Bad 128-bit move", gen_rtx_SET (dest
, src
));
20977 /* Validate a 128-bit move. */
20979 rs6000_move_128bit_ok_p (rtx operands
[])
20981 machine_mode mode
= GET_MODE (operands
[0]);
20982 return (gpc_reg_operand (operands
[0], mode
)
20983 || gpc_reg_operand (operands
[1], mode
));
20986 /* Return true if a 128-bit move needs to be split. */
20988 rs6000_split_128bit_ok_p (rtx operands
[])
20990 if (!reload_completed
)
20993 if (!gpr_or_gpr_p (operands
[0], operands
[1]))
20996 if (quad_load_store_p (operands
[0], operands
[1]))
21003 /* Given a comparison operation, return the bit number in CCR to test. We
21004 know this is a valid comparison.
21006 SCC_P is 1 if this is for an scc. That means that %D will have been
21007 used instead of %C, so the bits will be in different places.
21009 Return -1 if OP isn't a valid comparison for some reason. */
21012 ccr_bit (rtx op
, int scc_p
)
21014 enum rtx_code code
= GET_CODE (op
);
21015 machine_mode cc_mode
;
21020 if (!COMPARISON_P (op
))
21023 reg
= XEXP (op
, 0);
21025 gcc_assert (GET_CODE (reg
) == REG
&& CR_REGNO_P (REGNO (reg
)));
21027 cc_mode
= GET_MODE (reg
);
21028 cc_regnum
= REGNO (reg
);
21029 base_bit
= 4 * (cc_regnum
- CR0_REGNO
);
21031 validate_condition_mode (code
, cc_mode
);
21033 /* When generating a sCOND operation, only positive conditions are
21036 || code
== EQ
|| code
== GT
|| code
== LT
|| code
== UNORDERED
21037 || code
== GTU
|| code
== LTU
);
21042 return scc_p
? base_bit
+ 3 : base_bit
+ 2;
21044 return base_bit
+ 2;
21045 case GT
: case GTU
: case UNLE
:
21046 return base_bit
+ 1;
21047 case LT
: case LTU
: case UNGE
:
21049 case ORDERED
: case UNORDERED
:
21050 return base_bit
+ 3;
21053 /* If scc, we will have done a cror to put the bit in the
21054 unordered position. So test that bit. For integer, this is ! LT
21055 unless this is an scc insn. */
21056 return scc_p
? base_bit
+ 3 : base_bit
;
21059 return scc_p
? base_bit
+ 3 : base_bit
+ 1;
21062 gcc_unreachable ();
21066 /* Return the GOT register. */
21069 rs6000_got_register (rtx value ATTRIBUTE_UNUSED
)
21071 /* The second flow pass currently (June 1999) can't update
21072 regs_ever_live without disturbing other parts of the compiler, so
21073 update it here to make the prolog/epilogue code happy. */
21074 if (!can_create_pseudo_p ()
21075 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM
))
21076 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM
, true);
21078 crtl
->uses_pic_offset_table
= 1;
21080 return pic_offset_table_rtx
;
21083 static rs6000_stack_t stack_info
;
21085 /* Function to init struct machine_function.
21086 This will be called, via a pointer variable,
21087 from push_function_context. */
21089 static struct machine_function
*
21090 rs6000_init_machine_status (void)
21092 stack_info
.reload_completed
= 0;
21093 return ggc_cleared_alloc
<machine_function
> ();
21096 #define INT_P(X) (GET_CODE (X) == CONST_INT && GET_MODE (X) == VOIDmode)
21098 /* Write out a function code label. */
21101 rs6000_output_function_entry (FILE *file
, const char *fname
)
21103 if (fname
[0] != '.')
21105 switch (DEFAULT_ABI
)
21108 gcc_unreachable ();
21114 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "L.");
21124 RS6000_OUTPUT_BASENAME (file
, fname
);
21127 /* Print an operand. Recognize special options, documented below. */
21130 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
21131 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
21133 #define SMALL_DATA_RELOC "sda21"
21134 #define SMALL_DATA_REG 0
21138 print_operand (FILE *file
, rtx x
, int code
)
21141 unsigned HOST_WIDE_INT uval
;
21145 /* %a is output_address. */
21147 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
21151 /* Like 'J' but get to the GT bit only. */
21152 gcc_assert (REG_P (x
));
21154 /* Bit 1 is GT bit. */
21155 i
= 4 * (REGNO (x
) - CR0_REGNO
) + 1;
21157 /* Add one for shift count in rlinm for scc. */
21158 fprintf (file
, "%d", i
+ 1);
21162 /* If the low 16 bits are 0, but some other bit is set, write 's'. */
21165 output_operand_lossage ("invalid %%e value");
21170 if ((uval
& 0xffff) == 0 && uval
!= 0)
21175 /* X is a CR register. Print the number of the EQ bit of the CR */
21176 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
21177 output_operand_lossage ("invalid %%E value");
21179 fprintf (file
, "%d", 4 * (REGNO (x
) - CR0_REGNO
) + 2);
21183 /* X is a CR register. Print the shift count needed to move it
21184 to the high-order four bits. */
21185 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
21186 output_operand_lossage ("invalid %%f value");
21188 fprintf (file
, "%d", 4 * (REGNO (x
) - CR0_REGNO
));
21192 /* Similar, but print the count for the rotate in the opposite
21194 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
21195 output_operand_lossage ("invalid %%F value");
21197 fprintf (file
, "%d", 32 - 4 * (REGNO (x
) - CR0_REGNO
));
21201 /* X is a constant integer. If it is negative, print "m",
21202 otherwise print "z". This is to make an aze or ame insn. */
21203 if (GET_CODE (x
) != CONST_INT
)
21204 output_operand_lossage ("invalid %%G value");
21205 else if (INTVAL (x
) >= 0)
21212 /* If constant, output low-order five bits. Otherwise, write
21215 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
) & 31);
21217 print_operand (file
, x
, 0);
21221 /* If constant, output low-order six bits. Otherwise, write
21224 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
) & 63);
21226 print_operand (file
, x
, 0);
21230 /* Print `i' if this is a constant, else nothing. */
21236 /* Write the bit number in CCR for jump. */
21237 i
= ccr_bit (x
, 0);
21239 output_operand_lossage ("invalid %%j code");
21241 fprintf (file
, "%d", i
);
21245 /* Similar, but add one for shift count in rlinm for scc and pass
21246 scc flag to `ccr_bit'. */
21247 i
= ccr_bit (x
, 1);
21249 output_operand_lossage ("invalid %%J code");
21251 /* If we want bit 31, write a shift count of zero, not 32. */
21252 fprintf (file
, "%d", i
== 31 ? 0 : i
+ 1);
21256 /* X must be a constant. Write the 1's complement of the
21259 output_operand_lossage ("invalid %%k value");
21261 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ~ INTVAL (x
));
21265 /* X must be a symbolic constant on ELF. Write an
21266 expression suitable for an 'addi' that adds in the low 16
21267 bits of the MEM. */
21268 if (GET_CODE (x
) == CONST
)
21270 if (GET_CODE (XEXP (x
, 0)) != PLUS
21271 || (GET_CODE (XEXP (XEXP (x
, 0), 0)) != SYMBOL_REF
21272 && GET_CODE (XEXP (XEXP (x
, 0), 0)) != LABEL_REF
)
21273 || GET_CODE (XEXP (XEXP (x
, 0), 1)) != CONST_INT
)
21274 output_operand_lossage ("invalid %%K value");
21276 print_operand_address (file
, x
);
21277 fputs ("@l", file
);
21280 /* %l is output_asm_label. */
21283 /* Write second word of DImode or DFmode reference. Works on register
21284 or non-indexed memory only. */
21286 fputs (reg_names
[REGNO (x
) + 1], file
);
21287 else if (MEM_P (x
))
21289 machine_mode mode
= GET_MODE (x
);
21290 /* Handle possible auto-increment. Since it is pre-increment and
21291 we have already done it, we can just use an offset of word. */
21292 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
21293 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
21294 output_address (mode
, plus_constant (Pmode
, XEXP (XEXP (x
, 0), 0),
21296 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
21297 output_address (mode
, plus_constant (Pmode
, XEXP (XEXP (x
, 0), 0),
21300 output_address (mode
, XEXP (adjust_address_nv (x
, SImode
,
21304 if (small_data_operand (x
, GET_MODE (x
)))
21305 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
21306 reg_names
[SMALL_DATA_REG
]);
21311 /* Write the number of elements in the vector times 4. */
21312 if (GET_CODE (x
) != PARALLEL
)
21313 output_operand_lossage ("invalid %%N value");
21315 fprintf (file
, "%d", XVECLEN (x
, 0) * 4);
21319 /* Similar, but subtract 1 first. */
21320 if (GET_CODE (x
) != PARALLEL
)
21321 output_operand_lossage ("invalid %%O value");
21323 fprintf (file
, "%d", (XVECLEN (x
, 0) - 1) * 4);
21327 /* X is a CONST_INT that is a power of two. Output the logarithm. */
21330 || (i
= exact_log2 (INTVAL (x
))) < 0)
21331 output_operand_lossage ("invalid %%p value");
21333 fprintf (file
, "%d", i
);
21337 /* The operand must be an indirect memory reference. The result
21338 is the register name. */
21339 if (GET_CODE (x
) != MEM
|| GET_CODE (XEXP (x
, 0)) != REG
21340 || REGNO (XEXP (x
, 0)) >= 32)
21341 output_operand_lossage ("invalid %%P value");
21343 fputs (reg_names
[REGNO (XEXP (x
, 0))], file
);
21347 /* This outputs the logical code corresponding to a boolean
21348 expression. The expression may have one or both operands
21349 negated (if one, only the first one). For condition register
21350 logical operations, it will also treat the negated
21351 CR codes as NOTs, but not handle NOTs of them. */
21353 const char *const *t
= 0;
21355 enum rtx_code code
= GET_CODE (x
);
21356 static const char * const tbl
[3][3] = {
21357 { "and", "andc", "nor" },
21358 { "or", "orc", "nand" },
21359 { "xor", "eqv", "xor" } };
21363 else if (code
== IOR
)
21365 else if (code
== XOR
)
21368 output_operand_lossage ("invalid %%q value");
21370 if (GET_CODE (XEXP (x
, 0)) != NOT
)
21374 if (GET_CODE (XEXP (x
, 1)) == NOT
)
21385 if (! TARGET_MFCRF
)
21391 /* X is a CR register. Print the mask for `mtcrf'. */
21392 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
21393 output_operand_lossage ("invalid %%R value");
21395 fprintf (file
, "%d", 128 >> (REGNO (x
) - CR0_REGNO
));
21399 /* Low 5 bits of 32 - value */
21401 output_operand_lossage ("invalid %%s value");
21403 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, (32 - INTVAL (x
)) & 31);
21407 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
21408 gcc_assert (REG_P (x
) && GET_MODE (x
) == CCmode
);
21410 /* Bit 3 is OV bit. */
21411 i
= 4 * (REGNO (x
) - CR0_REGNO
) + 3;
21413 /* If we want bit 31, write a shift count of zero, not 32. */
21414 fprintf (file
, "%d", i
== 31 ? 0 : i
+ 1);
21418 /* Print the symbolic name of a branch target register. */
21419 if (GET_CODE (x
) != REG
|| (REGNO (x
) != LR_REGNO
21420 && REGNO (x
) != CTR_REGNO
))
21421 output_operand_lossage ("invalid %%T value");
21422 else if (REGNO (x
) == LR_REGNO
)
21423 fputs ("lr", file
);
21425 fputs ("ctr", file
);
21429 /* High-order or low-order 16 bits of constant, whichever is non-zero,
21430 for use in unsigned operand. */
21433 output_operand_lossage ("invalid %%u value");
21438 if ((uval
& 0xffff) == 0)
21441 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
, uval
& 0xffff);
21445 /* High-order 16 bits of constant for use in signed operand. */
21447 output_operand_lossage ("invalid %%v value");
21449 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
,
21450 (INTVAL (x
) >> 16) & 0xffff);
21454 /* Print `u' if this has an auto-increment or auto-decrement. */
21456 && (GET_CODE (XEXP (x
, 0)) == PRE_INC
21457 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
21458 || GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
))
21463 /* Print the trap code for this operand. */
21464 switch (GET_CODE (x
))
21467 fputs ("eq", file
); /* 4 */
21470 fputs ("ne", file
); /* 24 */
21473 fputs ("lt", file
); /* 16 */
21476 fputs ("le", file
); /* 20 */
21479 fputs ("gt", file
); /* 8 */
21482 fputs ("ge", file
); /* 12 */
21485 fputs ("llt", file
); /* 2 */
21488 fputs ("lle", file
); /* 6 */
21491 fputs ("lgt", file
); /* 1 */
21494 fputs ("lge", file
); /* 5 */
21497 gcc_unreachable ();
21502 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
21505 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
21506 ((INTVAL (x
) & 0xffff) ^ 0x8000) - 0x8000);
21508 print_operand (file
, x
, 0);
21512 /* X is a FPR or Altivec register used in a VSX context. */
21513 if (GET_CODE (x
) != REG
|| !VSX_REGNO_P (REGNO (x
)))
21514 output_operand_lossage ("invalid %%x value");
21517 int reg
= REGNO (x
);
21518 int vsx_reg
= (FP_REGNO_P (reg
)
21520 : reg
- FIRST_ALTIVEC_REGNO
+ 32);
21522 #ifdef TARGET_REGNAMES
21523 if (TARGET_REGNAMES
)
21524 fprintf (file
, "%%vs%d", vsx_reg
);
21527 fprintf (file
, "%d", vsx_reg
);
21533 && (legitimate_indexed_address_p (XEXP (x
, 0), 0)
21534 || (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
21535 && legitimate_indexed_address_p (XEXP (XEXP (x
, 0), 1), 0))))
21540 /* Like 'L', for third word of TImode/PTImode */
21542 fputs (reg_names
[REGNO (x
) + 2], file
);
21543 else if (MEM_P (x
))
21545 machine_mode mode
= GET_MODE (x
);
21546 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
21547 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
21548 output_address (mode
, plus_constant (Pmode
,
21549 XEXP (XEXP (x
, 0), 0), 8));
21550 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
21551 output_address (mode
, plus_constant (Pmode
,
21552 XEXP (XEXP (x
, 0), 0), 8));
21554 output_address (mode
, XEXP (adjust_address_nv (x
, SImode
, 8), 0));
21555 if (small_data_operand (x
, GET_MODE (x
)))
21556 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
21557 reg_names
[SMALL_DATA_REG
]);
21562 /* X is a SYMBOL_REF. Write out the name preceded by a
21563 period and without any trailing data in brackets. Used for function
21564 names. If we are configured for System V (or the embedded ABI) on
21565 the PowerPC, do not emit the period, since those systems do not use
21566 TOCs and the like. */
21567 gcc_assert (GET_CODE (x
) == SYMBOL_REF
);
21569 /* For macho, check to see if we need a stub. */
21572 const char *name
= XSTR (x
, 0);
21574 if (darwin_emit_branch_islands
21575 && MACHOPIC_INDIRECT
21576 && machopic_classify_symbol (x
) == MACHOPIC_UNDEFINED_FUNCTION
)
21577 name
= machopic_indirection_name (x
, /*stub_p=*/true);
21579 assemble_name (file
, name
);
21581 else if (!DOT_SYMBOLS
)
21582 assemble_name (file
, XSTR (x
, 0));
21584 rs6000_output_function_entry (file
, XSTR (x
, 0));
21588 /* Like 'L', for last word of TImode/PTImode. */
21590 fputs (reg_names
[REGNO (x
) + 3], file
);
21591 else if (MEM_P (x
))
21593 machine_mode mode
= GET_MODE (x
);
21594 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
21595 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
21596 output_address (mode
, plus_constant (Pmode
,
21597 XEXP (XEXP (x
, 0), 0), 12));
21598 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
21599 output_address (mode
, plus_constant (Pmode
,
21600 XEXP (XEXP (x
, 0), 0), 12));
21602 output_address (mode
, XEXP (adjust_address_nv (x
, SImode
, 12), 0));
21603 if (small_data_operand (x
, GET_MODE (x
)))
21604 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
21605 reg_names
[SMALL_DATA_REG
]);
21609 /* Print AltiVec memory operand. */
21614 gcc_assert (MEM_P (x
));
21618 if (VECTOR_MEM_ALTIVEC_P (GET_MODE (x
))
21619 && GET_CODE (tmp
) == AND
21620 && GET_CODE (XEXP (tmp
, 1)) == CONST_INT
21621 && INTVAL (XEXP (tmp
, 1)) == -16)
21622 tmp
= XEXP (tmp
, 0);
21623 else if (VECTOR_MEM_VSX_P (GET_MODE (x
))
21624 && GET_CODE (tmp
) == PRE_MODIFY
)
21625 tmp
= XEXP (tmp
, 1);
21627 fprintf (file
, "0,%s", reg_names
[REGNO (tmp
)]);
21630 if (GET_CODE (tmp
) != PLUS
21631 || !REG_P (XEXP (tmp
, 0))
21632 || !REG_P (XEXP (tmp
, 1)))
21634 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
21638 if (REGNO (XEXP (tmp
, 0)) == 0)
21639 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (tmp
, 1)) ],
21640 reg_names
[ REGNO (XEXP (tmp
, 0)) ]);
21642 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (tmp
, 0)) ],
21643 reg_names
[ REGNO (XEXP (tmp
, 1)) ]);
21650 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
21651 else if (MEM_P (x
))
21653 /* We need to handle PRE_INC and PRE_DEC here, since we need to
21654 know the width from the mode. */
21655 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
)
21656 fprintf (file
, "%d(%s)", GET_MODE_SIZE (GET_MODE (x
)),
21657 reg_names
[REGNO (XEXP (XEXP (x
, 0), 0))]);
21658 else if (GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
21659 fprintf (file
, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x
)),
21660 reg_names
[REGNO (XEXP (XEXP (x
, 0), 0))]);
21661 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
21662 output_address (GET_MODE (x
), XEXP (XEXP (x
, 0), 1));
21664 output_address (GET_MODE (x
), XEXP (x
, 0));
21668 if (toc_relative_expr_p (x
, false, &tocrel_base_oac
, &tocrel_offset_oac
))
21669 /* This hack along with a corresponding hack in
21670 rs6000_output_addr_const_extra arranges to output addends
21671 where the assembler expects to find them. eg.
21672 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
21673 without this hack would be output as "x@toc+4". We
21675 output_addr_const (file
, CONST_CAST_RTX (tocrel_base_oac
));
21677 output_addr_const (file
, x
);
21682 if (const char *name
= get_some_local_dynamic_name ())
21683 assemble_name (file
, name
);
21685 output_operand_lossage ("'%%&' used without any "
21686 "local dynamic TLS references");
21690 output_operand_lossage ("invalid %%xn code");
21694 /* Print the address of an operand. */
21697 print_operand_address (FILE *file
, rtx x
)
21700 fprintf (file
, "0(%s)", reg_names
[ REGNO (x
) ]);
21701 else if (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == CONST
21702 || GET_CODE (x
) == LABEL_REF
)
21704 output_addr_const (file
, x
);
21705 if (small_data_operand (x
, GET_MODE (x
)))
21706 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
21707 reg_names
[SMALL_DATA_REG
]);
21709 gcc_assert (!TARGET_TOC
);
21711 else if (GET_CODE (x
) == PLUS
&& REG_P (XEXP (x
, 0))
21712 && REG_P (XEXP (x
, 1)))
21714 if (REGNO (XEXP (x
, 0)) == 0)
21715 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (x
, 1)) ],
21716 reg_names
[ REGNO (XEXP (x
, 0)) ]);
21718 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (x
, 0)) ],
21719 reg_names
[ REGNO (XEXP (x
, 1)) ]);
21721 else if (GET_CODE (x
) == PLUS
&& REG_P (XEXP (x
, 0))
21722 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
21723 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
"(%s)",
21724 INTVAL (XEXP (x
, 1)), reg_names
[ REGNO (XEXP (x
, 0)) ]);
21726 else if (GET_CODE (x
) == LO_SUM
&& REG_P (XEXP (x
, 0))
21727 && CONSTANT_P (XEXP (x
, 1)))
21729 fprintf (file
, "lo16(");
21730 output_addr_const (file
, XEXP (x
, 1));
21731 fprintf (file
, ")(%s)", reg_names
[ REGNO (XEXP (x
, 0)) ]);
21735 else if (GET_CODE (x
) == LO_SUM
&& REG_P (XEXP (x
, 0))
21736 && CONSTANT_P (XEXP (x
, 1)))
21738 output_addr_const (file
, XEXP (x
, 1));
21739 fprintf (file
, "@l(%s)", reg_names
[ REGNO (XEXP (x
, 0)) ]);
21742 else if (toc_relative_expr_p (x
, false, &tocrel_base_oac
, &tocrel_offset_oac
))
21744 /* This hack along with a corresponding hack in
21745 rs6000_output_addr_const_extra arranges to output addends
21746 where the assembler expects to find them. eg.
21748 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
21749 without this hack would be output as "x@toc+8@l(9)". We
21750 want "x+8@toc@l(9)". */
21751 output_addr_const (file
, CONST_CAST_RTX (tocrel_base_oac
));
21752 if (GET_CODE (x
) == LO_SUM
)
21753 fprintf (file
, "@l(%s)", reg_names
[REGNO (XEXP (x
, 0))]);
21755 fprintf (file
, "(%s)", reg_names
[REGNO (XVECEXP (tocrel_base_oac
, 0, 1))]);
21758 gcc_unreachable ();
21761 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
21764 rs6000_output_addr_const_extra (FILE *file
, rtx x
)
21766 if (GET_CODE (x
) == UNSPEC
)
21767 switch (XINT (x
, 1))
21769 case UNSPEC_TOCREL
:
21770 gcc_checking_assert (GET_CODE (XVECEXP (x
, 0, 0)) == SYMBOL_REF
21771 && REG_P (XVECEXP (x
, 0, 1))
21772 && REGNO (XVECEXP (x
, 0, 1)) == TOC_REGISTER
);
21773 output_addr_const (file
, XVECEXP (x
, 0, 0));
21774 if (x
== tocrel_base_oac
&& tocrel_offset_oac
!= const0_rtx
)
21776 if (INTVAL (tocrel_offset_oac
) >= 0)
21777 fprintf (file
, "+");
21778 output_addr_const (file
, CONST_CAST_RTX (tocrel_offset_oac
));
21780 if (!TARGET_AIX
|| (TARGET_ELF
&& TARGET_MINIMAL_TOC
))
21783 assemble_name (file
, toc_label_name
);
21786 else if (TARGET_ELF
)
21787 fputs ("@toc", file
);
21791 case UNSPEC_MACHOPIC_OFFSET
:
21792 output_addr_const (file
, XVECEXP (x
, 0, 0));
21794 machopic_output_function_base_name (file
);
21801 /* Target hook for assembling integer objects. The PowerPC version has
21802 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
21803 is defined. It also needs to handle DI-mode objects on 64-bit
21807 rs6000_assemble_integer (rtx x
, unsigned int size
, int aligned_p
)
21809 #ifdef RELOCATABLE_NEEDS_FIXUP
21810 /* Special handling for SI values. */
21811 if (RELOCATABLE_NEEDS_FIXUP
&& size
== 4 && aligned_p
)
21813 static int recurse
= 0;
21815 /* For -mrelocatable, we mark all addresses that need to be fixed up in
21816 the .fixup section. Since the TOC section is already relocated, we
21817 don't need to mark it here. We used to skip the text section, but it
21818 should never be valid for relocated addresses to be placed in the text
21820 if (DEFAULT_ABI
== ABI_V4
21821 && (TARGET_RELOCATABLE
|| flag_pic
> 1)
21822 && in_section
!= toc_section
21824 && !CONST_SCALAR_INT_P (x
)
21830 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCP", fixuplabelno
);
21832 ASM_OUTPUT_LABEL (asm_out_file
, buf
);
21833 fprintf (asm_out_file
, "\t.long\t(");
21834 output_addr_const (asm_out_file
, x
);
21835 fprintf (asm_out_file
, ")@fixup\n");
21836 fprintf (asm_out_file
, "\t.section\t\".fixup\",\"aw\"\n");
21837 ASM_OUTPUT_ALIGN (asm_out_file
, 2);
21838 fprintf (asm_out_file
, "\t.long\t");
21839 assemble_name (asm_out_file
, buf
);
21840 fprintf (asm_out_file
, "\n\t.previous\n");
21844 /* Remove initial .'s to turn a -mcall-aixdesc function
21845 address into the address of the descriptor, not the function
21847 else if (GET_CODE (x
) == SYMBOL_REF
21848 && XSTR (x
, 0)[0] == '.'
21849 && DEFAULT_ABI
== ABI_AIX
)
21851 const char *name
= XSTR (x
, 0);
21852 while (*name
== '.')
21855 fprintf (asm_out_file
, "\t.long\t%s\n", name
);
21859 #endif /* RELOCATABLE_NEEDS_FIXUP */
21860 return default_assemble_integer (x
, size
, aligned_p
);
21863 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
21864 /* Emit an assembler directive to set symbol visibility for DECL to
21865 VISIBILITY_TYPE. */
21868 rs6000_assemble_visibility (tree decl
, int vis
)
21873 /* Functions need to have their entry point symbol visibility set as
21874 well as their descriptor symbol visibility. */
21875 if (DEFAULT_ABI
== ABI_AIX
21877 && TREE_CODE (decl
) == FUNCTION_DECL
)
21879 static const char * const visibility_types
[] = {
21880 NULL
, "protected", "hidden", "internal"
21883 const char *name
, *type
;
21885 name
= ((* targetm
.strip_name_encoding
)
21886 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
))));
21887 type
= visibility_types
[vis
];
21889 fprintf (asm_out_file
, "\t.%s\t%s\n", type
, name
);
21890 fprintf (asm_out_file
, "\t.%s\t.%s\n", type
, name
);
21893 default_assemble_visibility (decl
, vis
);
21898 rs6000_reverse_condition (machine_mode mode
, enum rtx_code code
)
21900 /* Reversal of FP compares takes care -- an ordered compare
21901 becomes an unordered compare and vice versa. */
21902 if (mode
== CCFPmode
21903 && (!flag_finite_math_only
21904 || code
== UNLT
|| code
== UNLE
|| code
== UNGT
|| code
== UNGE
21905 || code
== UNEQ
|| code
== LTGT
))
21906 return reverse_condition_maybe_unordered (code
);
21908 return reverse_condition (code
);
21911 /* Generate a compare for CODE. Return a brand-new rtx that
21912 represents the result of the compare. */
21915 rs6000_generate_compare (rtx cmp
, machine_mode mode
)
21917 machine_mode comp_mode
;
21918 rtx compare_result
;
21919 enum rtx_code code
= GET_CODE (cmp
);
21920 rtx op0
= XEXP (cmp
, 0);
21921 rtx op1
= XEXP (cmp
, 1);
21923 if (!TARGET_FLOAT128_HW
&& FLOAT128_VECTOR_P (mode
))
21924 comp_mode
= CCmode
;
21925 else if (FLOAT_MODE_P (mode
))
21926 comp_mode
= CCFPmode
;
21927 else if (code
== GTU
|| code
== LTU
21928 || code
== GEU
|| code
== LEU
)
21929 comp_mode
= CCUNSmode
;
21930 else if ((code
== EQ
|| code
== NE
)
21931 && unsigned_reg_p (op0
)
21932 && (unsigned_reg_p (op1
)
21933 || (CONST_INT_P (op1
) && INTVAL (op1
) != 0)))
21934 /* These are unsigned values, perhaps there will be a later
21935 ordering compare that can be shared with this one. */
21936 comp_mode
= CCUNSmode
;
21938 comp_mode
= CCmode
;
21940 /* If we have an unsigned compare, make sure we don't have a signed value as
21942 if (comp_mode
== CCUNSmode
&& GET_CODE (op1
) == CONST_INT
21943 && INTVAL (op1
) < 0)
21945 op0
= copy_rtx_if_shared (op0
);
21946 op1
= force_reg (GET_MODE (op0
), op1
);
21947 cmp
= gen_rtx_fmt_ee (code
, GET_MODE (cmp
), op0
, op1
);
21950 /* First, the compare. */
21951 compare_result
= gen_reg_rtx (comp_mode
);
21953 /* IEEE 128-bit support in VSX registers when we do not have hardware
21955 if (!TARGET_FLOAT128_HW
&& FLOAT128_VECTOR_P (mode
))
21957 rtx libfunc
= NULL_RTX
;
21958 bool check_nan
= false;
21965 libfunc
= optab_libfunc (eq_optab
, mode
);
21970 libfunc
= optab_libfunc (ge_optab
, mode
);
21975 libfunc
= optab_libfunc (le_optab
, mode
);
21980 libfunc
= optab_libfunc (unord_optab
, mode
);
21981 code
= (code
== UNORDERED
) ? NE
: EQ
;
21987 libfunc
= optab_libfunc (ge_optab
, mode
);
21988 code
= (code
== UNGE
) ? GE
: GT
;
21994 libfunc
= optab_libfunc (le_optab
, mode
);
21995 code
= (code
== UNLE
) ? LE
: LT
;
22001 libfunc
= optab_libfunc (eq_optab
, mode
);
22002 code
= (code
= UNEQ
) ? EQ
: NE
;
22006 gcc_unreachable ();
22009 gcc_assert (libfunc
);
22012 dest
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
22013 SImode
, op0
, mode
, op1
, mode
);
22015 /* The library signals an exception for signalling NaNs, so we need to
22016 handle isgreater, etc. by first checking isordered. */
22019 rtx ne_rtx
, normal_dest
, unord_dest
;
22020 rtx unord_func
= optab_libfunc (unord_optab
, mode
);
22021 rtx join_label
= gen_label_rtx ();
22022 rtx join_ref
= gen_rtx_LABEL_REF (VOIDmode
, join_label
);
22023 rtx unord_cmp
= gen_reg_rtx (comp_mode
);
22026 /* Test for either value being a NaN. */
22027 gcc_assert (unord_func
);
22028 unord_dest
= emit_library_call_value (unord_func
, NULL_RTX
, LCT_CONST
,
22029 SImode
, op0
, mode
, op1
, mode
);
22031 /* Set value (0) if either value is a NaN, and jump to the join
22033 dest
= gen_reg_rtx (SImode
);
22034 emit_move_insn (dest
, const1_rtx
);
22035 emit_insn (gen_rtx_SET (unord_cmp
,
22036 gen_rtx_COMPARE (comp_mode
, unord_dest
,
22039 ne_rtx
= gen_rtx_NE (comp_mode
, unord_cmp
, const0_rtx
);
22040 emit_jump_insn (gen_rtx_SET (pc_rtx
,
22041 gen_rtx_IF_THEN_ELSE (VOIDmode
, ne_rtx
,
22045 /* Do the normal comparison, knowing that the values are not
22047 normal_dest
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
22048 SImode
, op0
, mode
, op1
, mode
);
22050 emit_insn (gen_cstoresi4 (dest
,
22051 gen_rtx_fmt_ee (code
, SImode
, normal_dest
,
22053 normal_dest
, const0_rtx
));
22055 /* Join NaN and non-Nan paths. Compare dest against 0. */
22056 emit_label (join_label
);
22060 emit_insn (gen_rtx_SET (compare_result
,
22061 gen_rtx_COMPARE (comp_mode
, dest
, const0_rtx
)));
22066 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
22067 CLOBBERs to match cmptf_internal2 pattern. */
22068 if (comp_mode
== CCFPmode
&& TARGET_XL_COMPAT
22069 && FLOAT128_IBM_P (GET_MODE (op0
))
22070 && TARGET_HARD_FLOAT
)
22071 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
22073 gen_rtx_SET (compare_result
,
22074 gen_rtx_COMPARE (comp_mode
, op0
, op1
)),
22075 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
22076 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
22077 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
22078 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
22079 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
22080 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
22081 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
22082 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
22083 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (Pmode
)))));
22084 else if (GET_CODE (op1
) == UNSPEC
22085 && XINT (op1
, 1) == UNSPEC_SP_TEST
)
22087 rtx op1b
= XVECEXP (op1
, 0, 0);
22088 comp_mode
= CCEQmode
;
22089 compare_result
= gen_reg_rtx (CCEQmode
);
22091 emit_insn (gen_stack_protect_testdi (compare_result
, op0
, op1b
));
22093 emit_insn (gen_stack_protect_testsi (compare_result
, op0
, op1b
));
22096 emit_insn (gen_rtx_SET (compare_result
,
22097 gen_rtx_COMPARE (comp_mode
, op0
, op1
)));
22100 /* Some kinds of FP comparisons need an OR operation;
22101 under flag_finite_math_only we don't bother. */
22102 if (FLOAT_MODE_P (mode
)
22103 && (!FLOAT128_IEEE_P (mode
) || TARGET_FLOAT128_HW
)
22104 && !flag_finite_math_only
22105 && (code
== LE
|| code
== GE
22106 || code
== UNEQ
|| code
== LTGT
22107 || code
== UNGT
|| code
== UNLT
))
22109 enum rtx_code or1
, or2
;
22110 rtx or1_rtx
, or2_rtx
, compare2_rtx
;
22111 rtx or_result
= gen_reg_rtx (CCEQmode
);
22115 case LE
: or1
= LT
; or2
= EQ
; break;
22116 case GE
: or1
= GT
; or2
= EQ
; break;
22117 case UNEQ
: or1
= UNORDERED
; or2
= EQ
; break;
22118 case LTGT
: or1
= LT
; or2
= GT
; break;
22119 case UNGT
: or1
= UNORDERED
; or2
= GT
; break;
22120 case UNLT
: or1
= UNORDERED
; or2
= LT
; break;
22121 default: gcc_unreachable ();
22123 validate_condition_mode (or1
, comp_mode
);
22124 validate_condition_mode (or2
, comp_mode
);
22125 or1_rtx
= gen_rtx_fmt_ee (or1
, SImode
, compare_result
, const0_rtx
);
22126 or2_rtx
= gen_rtx_fmt_ee (or2
, SImode
, compare_result
, const0_rtx
);
22127 compare2_rtx
= gen_rtx_COMPARE (CCEQmode
,
22128 gen_rtx_IOR (SImode
, or1_rtx
, or2_rtx
),
22130 emit_insn (gen_rtx_SET (or_result
, compare2_rtx
));
22132 compare_result
= or_result
;
22136 validate_condition_mode (code
, GET_MODE (compare_result
));
22138 return gen_rtx_fmt_ee (code
, VOIDmode
, compare_result
, const0_rtx
);
22142 /* Return the diagnostic message string if the binary operation OP is
22143 not permitted on TYPE1 and TYPE2, NULL otherwise. */
22146 rs6000_invalid_binary_op (int op ATTRIBUTE_UNUSED
,
22150 machine_mode mode1
= TYPE_MODE (type1
);
22151 machine_mode mode2
= TYPE_MODE (type2
);
22153 /* For complex modes, use the inner type. */
22154 if (COMPLEX_MODE_P (mode1
))
22155 mode1
= GET_MODE_INNER (mode1
);
22157 if (COMPLEX_MODE_P (mode2
))
22158 mode2
= GET_MODE_INNER (mode2
);
22160 /* Don't allow IEEE 754R 128-bit binary floating point and IBM extended
22161 double to intermix unless -mfloat128-convert. */
22162 if (mode1
== mode2
)
22165 if (!TARGET_FLOAT128_CVT
)
22167 if ((mode1
== KFmode
&& mode2
== IFmode
)
22168 || (mode1
== IFmode
&& mode2
== KFmode
))
22169 return N_("__float128 and __ibm128 cannot be used in the same "
22172 if (TARGET_IEEEQUAD
22173 && ((mode1
== IFmode
&& mode2
== TFmode
)
22174 || (mode1
== TFmode
&& mode2
== IFmode
)))
22175 return N_("__ibm128 and long double cannot be used in the same "
22178 if (!TARGET_IEEEQUAD
22179 && ((mode1
== KFmode
&& mode2
== TFmode
)
22180 || (mode1
== TFmode
&& mode2
== KFmode
)))
22181 return N_("__float128 and long double cannot be used in the same "
22189 /* Expand floating point conversion to/from __float128 and __ibm128. */
22192 rs6000_expand_float128_convert (rtx dest
, rtx src
, bool unsigned_p
)
22194 machine_mode dest_mode
= GET_MODE (dest
);
22195 machine_mode src_mode
= GET_MODE (src
);
22196 convert_optab cvt
= unknown_optab
;
22197 bool do_move
= false;
22198 rtx libfunc
= NULL_RTX
;
22200 typedef rtx (*rtx_2func_t
) (rtx
, rtx
);
22201 rtx_2func_t hw_convert
= (rtx_2func_t
)0;
22205 rtx_2func_t from_df
;
22206 rtx_2func_t from_sf
;
22207 rtx_2func_t from_si_sign
;
22208 rtx_2func_t from_si_uns
;
22209 rtx_2func_t from_di_sign
;
22210 rtx_2func_t from_di_uns
;
22213 rtx_2func_t to_si_sign
;
22214 rtx_2func_t to_si_uns
;
22215 rtx_2func_t to_di_sign
;
22216 rtx_2func_t to_di_uns
;
22217 } hw_conversions
[2] = {
22218 /* convertions to/from KFmode */
22220 gen_extenddfkf2_hw
, /* KFmode <- DFmode. */
22221 gen_extendsfkf2_hw
, /* KFmode <- SFmode. */
22222 gen_float_kfsi2_hw
, /* KFmode <- SImode (signed). */
22223 gen_floatuns_kfsi2_hw
, /* KFmode <- SImode (unsigned). */
22224 gen_float_kfdi2_hw
, /* KFmode <- DImode (signed). */
22225 gen_floatuns_kfdi2_hw
, /* KFmode <- DImode (unsigned). */
22226 gen_trunckfdf2_hw
, /* DFmode <- KFmode. */
22227 gen_trunckfsf2_hw
, /* SFmode <- KFmode. */
22228 gen_fix_kfsi2_hw
, /* SImode <- KFmode (signed). */
22229 gen_fixuns_kfsi2_hw
, /* SImode <- KFmode (unsigned). */
22230 gen_fix_kfdi2_hw
, /* DImode <- KFmode (signed). */
22231 gen_fixuns_kfdi2_hw
, /* DImode <- KFmode (unsigned). */
22234 /* convertions to/from TFmode */
22236 gen_extenddftf2_hw
, /* TFmode <- DFmode. */
22237 gen_extendsftf2_hw
, /* TFmode <- SFmode. */
22238 gen_float_tfsi2_hw
, /* TFmode <- SImode (signed). */
22239 gen_floatuns_tfsi2_hw
, /* TFmode <- SImode (unsigned). */
22240 gen_float_tfdi2_hw
, /* TFmode <- DImode (signed). */
22241 gen_floatuns_tfdi2_hw
, /* TFmode <- DImode (unsigned). */
22242 gen_trunctfdf2_hw
, /* DFmode <- TFmode. */
22243 gen_trunctfsf2_hw
, /* SFmode <- TFmode. */
22244 gen_fix_tfsi2_hw
, /* SImode <- TFmode (signed). */
22245 gen_fixuns_tfsi2_hw
, /* SImode <- TFmode (unsigned). */
22246 gen_fix_tfdi2_hw
, /* DImode <- TFmode (signed). */
22247 gen_fixuns_tfdi2_hw
, /* DImode <- TFmode (unsigned). */
22251 if (dest_mode
== src_mode
)
22252 gcc_unreachable ();
22254 /* Eliminate memory operations. */
22256 src
= force_reg (src_mode
, src
);
22260 rtx tmp
= gen_reg_rtx (dest_mode
);
22261 rs6000_expand_float128_convert (tmp
, src
, unsigned_p
);
22262 rs6000_emit_move (dest
, tmp
, dest_mode
);
22266 /* Convert to IEEE 128-bit floating point. */
22267 if (FLOAT128_IEEE_P (dest_mode
))
22269 if (dest_mode
== KFmode
)
22271 else if (dest_mode
== TFmode
)
22274 gcc_unreachable ();
22280 hw_convert
= hw_conversions
[kf_or_tf
].from_df
;
22285 hw_convert
= hw_conversions
[kf_or_tf
].from_sf
;
22291 if (FLOAT128_IBM_P (src_mode
))
22300 cvt
= ufloat_optab
;
22301 hw_convert
= hw_conversions
[kf_or_tf
].from_si_uns
;
22305 cvt
= sfloat_optab
;
22306 hw_convert
= hw_conversions
[kf_or_tf
].from_si_sign
;
22313 cvt
= ufloat_optab
;
22314 hw_convert
= hw_conversions
[kf_or_tf
].from_di_uns
;
22318 cvt
= sfloat_optab
;
22319 hw_convert
= hw_conversions
[kf_or_tf
].from_di_sign
;
22324 gcc_unreachable ();
22328 /* Convert from IEEE 128-bit floating point. */
22329 else if (FLOAT128_IEEE_P (src_mode
))
22331 if (src_mode
== KFmode
)
22333 else if (src_mode
== TFmode
)
22336 gcc_unreachable ();
22342 hw_convert
= hw_conversions
[kf_or_tf
].to_df
;
22347 hw_convert
= hw_conversions
[kf_or_tf
].to_sf
;
22353 if (FLOAT128_IBM_P (dest_mode
))
22363 hw_convert
= hw_conversions
[kf_or_tf
].to_si_uns
;
22368 hw_convert
= hw_conversions
[kf_or_tf
].to_si_sign
;
22376 hw_convert
= hw_conversions
[kf_or_tf
].to_di_uns
;
22381 hw_convert
= hw_conversions
[kf_or_tf
].to_di_sign
;
22386 gcc_unreachable ();
22390 /* Both IBM format. */
22391 else if (FLOAT128_IBM_P (dest_mode
) && FLOAT128_IBM_P (src_mode
))
22395 gcc_unreachable ();
22397 /* Handle conversion between TFmode/KFmode. */
22399 emit_move_insn (dest
, gen_lowpart (dest_mode
, src
));
22401 /* Handle conversion if we have hardware support. */
22402 else if (TARGET_FLOAT128_HW
&& hw_convert
)
22403 emit_insn ((hw_convert
) (dest
, src
));
22405 /* Call an external function to do the conversion. */
22406 else if (cvt
!= unknown_optab
)
22408 libfunc
= convert_optab_libfunc (cvt
, dest_mode
, src_mode
);
22409 gcc_assert (libfunc
!= NULL_RTX
);
22411 dest2
= emit_library_call_value (libfunc
, dest
, LCT_CONST
, dest_mode
,
22414 gcc_assert (dest2
!= NULL_RTX
);
22415 if (!rtx_equal_p (dest
, dest2
))
22416 emit_move_insn (dest
, dest2
);
22420 gcc_unreachable ();
22426 /* Emit the RTL for an sISEL pattern. */
22429 rs6000_emit_sISEL (machine_mode mode ATTRIBUTE_UNUSED
, rtx operands
[])
22431 rs6000_emit_int_cmove (operands
[0], operands
[1], const1_rtx
, const0_rtx
);
22434 /* Emit RTL that sets a register to zero if OP1 and OP2 are equal. SCRATCH
22435 can be used as that dest register. Return the dest register. */
22438 rs6000_emit_eqne (machine_mode mode
, rtx op1
, rtx op2
, rtx scratch
)
22440 if (op2
== const0_rtx
)
22443 if (GET_CODE (scratch
) == SCRATCH
)
22444 scratch
= gen_reg_rtx (mode
);
22446 if (logical_operand (op2
, mode
))
22447 emit_insn (gen_rtx_SET (scratch
, gen_rtx_XOR (mode
, op1
, op2
)));
22449 emit_insn (gen_rtx_SET (scratch
,
22450 gen_rtx_PLUS (mode
, op1
, negate_rtx (mode
, op2
))));
22456 rs6000_emit_sCOND (machine_mode mode
, rtx operands
[])
22459 machine_mode op_mode
;
22460 enum rtx_code cond_code
;
22461 rtx result
= operands
[0];
22463 condition_rtx
= rs6000_generate_compare (operands
[1], mode
);
22464 cond_code
= GET_CODE (condition_rtx
);
22466 if (cond_code
== NE
22467 || cond_code
== GE
|| cond_code
== LE
22468 || cond_code
== GEU
|| cond_code
== LEU
22469 || cond_code
== ORDERED
|| cond_code
== UNGE
|| cond_code
== UNLE
)
22471 rtx not_result
= gen_reg_rtx (CCEQmode
);
22472 rtx not_op
, rev_cond_rtx
;
22473 machine_mode cc_mode
;
22475 cc_mode
= GET_MODE (XEXP (condition_rtx
, 0));
22477 rev_cond_rtx
= gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode
, cond_code
),
22478 SImode
, XEXP (condition_rtx
, 0), const0_rtx
);
22479 not_op
= gen_rtx_COMPARE (CCEQmode
, rev_cond_rtx
, const0_rtx
);
22480 emit_insn (gen_rtx_SET (not_result
, not_op
));
22481 condition_rtx
= gen_rtx_EQ (VOIDmode
, not_result
, const0_rtx
);
22484 op_mode
= GET_MODE (XEXP (operands
[1], 0));
22485 if (op_mode
== VOIDmode
)
22486 op_mode
= GET_MODE (XEXP (operands
[1], 1));
22488 if (TARGET_POWERPC64
&& (op_mode
== DImode
|| FLOAT_MODE_P (mode
)))
22490 PUT_MODE (condition_rtx
, DImode
);
22491 convert_move (result
, condition_rtx
, 0);
22495 PUT_MODE (condition_rtx
, SImode
);
22496 emit_insn (gen_rtx_SET (result
, condition_rtx
));
22500 /* Emit a branch of kind CODE to location LOC. */
22503 rs6000_emit_cbranch (machine_mode mode
, rtx operands
[])
22505 rtx condition_rtx
, loc_ref
;
22507 condition_rtx
= rs6000_generate_compare (operands
[0], mode
);
22508 loc_ref
= gen_rtx_LABEL_REF (VOIDmode
, operands
[3]);
22509 emit_jump_insn (gen_rtx_SET (pc_rtx
,
22510 gen_rtx_IF_THEN_ELSE (VOIDmode
, condition_rtx
,
22511 loc_ref
, pc_rtx
)));
22514 /* Return the string to output a conditional branch to LABEL, which is
22515 the operand template of the label, or NULL if the branch is really a
22516 conditional return.
22518 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
22519 condition code register and its mode specifies what kind of
22520 comparison we made.
22522 REVERSED is nonzero if we should reverse the sense of the comparison.
22524 INSN is the insn. */
22527 output_cbranch (rtx op
, const char *label
, int reversed
, rtx_insn
*insn
)
22529 static char string
[64];
22530 enum rtx_code code
= GET_CODE (op
);
22531 rtx cc_reg
= XEXP (op
, 0);
22532 machine_mode mode
= GET_MODE (cc_reg
);
22533 int cc_regno
= REGNO (cc_reg
) - CR0_REGNO
;
22534 int need_longbranch
= label
!= NULL
&& get_attr_length (insn
) == 8;
22535 int really_reversed
= reversed
^ need_longbranch
;
22541 validate_condition_mode (code
, mode
);
22543 /* Work out which way this really branches. We could use
22544 reverse_condition_maybe_unordered here always but this
22545 makes the resulting assembler clearer. */
22546 if (really_reversed
)
22548 /* Reversal of FP compares takes care -- an ordered compare
22549 becomes an unordered compare and vice versa. */
22550 if (mode
== CCFPmode
)
22551 code
= reverse_condition_maybe_unordered (code
);
22553 code
= reverse_condition (code
);
22558 /* Not all of these are actually distinct opcodes, but
22559 we distinguish them for clarity of the resulting assembler. */
22560 case NE
: case LTGT
:
22561 ccode
= "ne"; break;
22562 case EQ
: case UNEQ
:
22563 ccode
= "eq"; break;
22565 ccode
= "ge"; break;
22566 case GT
: case GTU
: case UNGT
:
22567 ccode
= "gt"; break;
22569 ccode
= "le"; break;
22570 case LT
: case LTU
: case UNLT
:
22571 ccode
= "lt"; break;
22572 case UNORDERED
: ccode
= "un"; break;
22573 case ORDERED
: ccode
= "nu"; break;
22574 case UNGE
: ccode
= "nl"; break;
22575 case UNLE
: ccode
= "ng"; break;
22577 gcc_unreachable ();
22580 /* Maybe we have a guess as to how likely the branch is. */
22582 note
= find_reg_note (insn
, REG_BR_PROB
, NULL_RTX
);
22583 if (note
!= NULL_RTX
)
22585 /* PROB is the difference from 50%. */
22586 int prob
= profile_probability::from_reg_br_prob_note (XINT (note
, 0))
22587 .to_reg_br_prob_base () - REG_BR_PROB_BASE
/ 2;
22589 /* Only hint for highly probable/improbable branches on newer cpus when
22590 we have real profile data, as static prediction overrides processor
22591 dynamic prediction. For older cpus we may as well always hint, but
22592 assume not taken for branches that are very close to 50% as a
22593 mispredicted taken branch is more expensive than a
22594 mispredicted not-taken branch. */
22595 if (rs6000_always_hint
22596 || (abs (prob
) > REG_BR_PROB_BASE
/ 100 * 48
22597 && (profile_status_for_fn (cfun
) != PROFILE_GUESSED
)
22598 && br_prob_note_reliable_p (note
)))
22600 if (abs (prob
) > REG_BR_PROB_BASE
/ 20
22601 && ((prob
> 0) ^ need_longbranch
))
22609 s
+= sprintf (s
, "b%slr%s ", ccode
, pred
);
22611 s
+= sprintf (s
, "b%s%s ", ccode
, pred
);
22613 /* We need to escape any '%' characters in the reg_names string.
22614 Assume they'd only be the first character.... */
22615 if (reg_names
[cc_regno
+ CR0_REGNO
][0] == '%')
22617 s
+= sprintf (s
, "%s", reg_names
[cc_regno
+ CR0_REGNO
]);
22621 /* If the branch distance was too far, we may have to use an
22622 unconditional branch to go the distance. */
22623 if (need_longbranch
)
22624 s
+= sprintf (s
, ",$+8\n\tb %s", label
);
22626 s
+= sprintf (s
, ",%s", label
);
22632 /* Return insn for VSX or Altivec comparisons. */
22635 rs6000_emit_vector_compare_inner (enum rtx_code code
, rtx op0
, rtx op1
)
22638 machine_mode mode
= GET_MODE (op0
);
22646 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
22657 mask
= gen_reg_rtx (mode
);
22658 emit_insn (gen_rtx_SET (mask
, gen_rtx_fmt_ee (code
, mode
, op0
, op1
)));
22665 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
22666 DMODE is expected destination mode. This is a recursive function. */
22669 rs6000_emit_vector_compare (enum rtx_code rcode
,
22671 machine_mode dmode
)
22674 bool swap_operands
= false;
22675 bool try_again
= false;
22677 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode
));
22678 gcc_assert (GET_MODE (op0
) == GET_MODE (op1
));
22680 /* See if the comparison works as is. */
22681 mask
= rs6000_emit_vector_compare_inner (rcode
, op0
, op1
);
22689 swap_operands
= true;
22694 swap_operands
= true;
22702 /* Invert condition and try again.
22703 e.g., A != B becomes ~(A==B). */
22705 enum rtx_code rev_code
;
22706 enum insn_code nor_code
;
22709 rev_code
= reverse_condition_maybe_unordered (rcode
);
22710 if (rev_code
== UNKNOWN
)
22713 nor_code
= optab_handler (one_cmpl_optab
, dmode
);
22714 if (nor_code
== CODE_FOR_nothing
)
22717 mask2
= rs6000_emit_vector_compare (rev_code
, op0
, op1
, dmode
);
22721 mask
= gen_reg_rtx (dmode
);
22722 emit_insn (GEN_FCN (nor_code
) (mask
, mask2
));
22730 /* Try GT/GTU/LT/LTU OR EQ */
22733 enum insn_code ior_code
;
22734 enum rtx_code new_code
;
22755 gcc_unreachable ();
22758 ior_code
= optab_handler (ior_optab
, dmode
);
22759 if (ior_code
== CODE_FOR_nothing
)
22762 c_rtx
= rs6000_emit_vector_compare (new_code
, op0
, op1
, dmode
);
22766 eq_rtx
= rs6000_emit_vector_compare (EQ
, op0
, op1
, dmode
);
22770 mask
= gen_reg_rtx (dmode
);
22771 emit_insn (GEN_FCN (ior_code
) (mask
, c_rtx
, eq_rtx
));
22782 std::swap (op0
, op1
);
22784 mask
= rs6000_emit_vector_compare_inner (rcode
, op0
, op1
);
22789 /* You only get two chances. */
22793 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
22794 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
22795 operands for the relation operation COND. */
22798 rs6000_emit_vector_cond_expr (rtx dest
, rtx op_true
, rtx op_false
,
22799 rtx cond
, rtx cc_op0
, rtx cc_op1
)
22801 machine_mode dest_mode
= GET_MODE (dest
);
22802 machine_mode mask_mode
= GET_MODE (cc_op0
);
22803 enum rtx_code rcode
= GET_CODE (cond
);
22804 machine_mode cc_mode
= CCmode
;
22807 bool invert_move
= false;
22809 if (VECTOR_UNIT_NONE_P (dest_mode
))
22812 gcc_assert (GET_MODE_SIZE (dest_mode
) == GET_MODE_SIZE (mask_mode
)
22813 && GET_MODE_NUNITS (dest_mode
) == GET_MODE_NUNITS (mask_mode
));
22817 /* Swap operands if we can, and fall back to doing the operation as
22818 specified, and doing a NOR to invert the test. */
22824 /* Invert condition and try again.
22825 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
22826 invert_move
= true;
22827 rcode
= reverse_condition_maybe_unordered (rcode
);
22828 if (rcode
== UNKNOWN
)
22834 if (GET_MODE_CLASS (mask_mode
) == MODE_VECTOR_INT
)
22836 /* Invert condition to avoid compound test. */
22837 invert_move
= true;
22838 rcode
= reverse_condition (rcode
);
22846 /* Mark unsigned tests with CCUNSmode. */
22847 cc_mode
= CCUNSmode
;
22849 /* Invert condition to avoid compound test if necessary. */
22850 if (rcode
== GEU
|| rcode
== LEU
)
22852 invert_move
= true;
22853 rcode
= reverse_condition (rcode
);
22861 /* Get the vector mask for the given relational operations. */
22862 mask
= rs6000_emit_vector_compare (rcode
, cc_op0
, cc_op1
, mask_mode
);
22868 std::swap (op_true
, op_false
);
22870 /* Optimize vec1 == vec2, to know the mask generates -1/0. */
22871 if (GET_MODE_CLASS (dest_mode
) == MODE_VECTOR_INT
22872 && (GET_CODE (op_true
) == CONST_VECTOR
22873 || GET_CODE (op_false
) == CONST_VECTOR
))
22875 rtx constant_0
= CONST0_RTX (dest_mode
);
22876 rtx constant_m1
= CONSTM1_RTX (dest_mode
);
22878 if (op_true
== constant_m1
&& op_false
== constant_0
)
22880 emit_move_insn (dest
, mask
);
22884 else if (op_true
== constant_0
&& op_false
== constant_m1
)
22886 emit_insn (gen_rtx_SET (dest
, gen_rtx_NOT (dest_mode
, mask
)));
22890 /* If we can't use the vector comparison directly, perhaps we can use
22891 the mask for the true or false fields, instead of loading up a
22893 if (op_true
== constant_m1
)
22896 if (op_false
== constant_0
)
22900 if (!REG_P (op_true
) && !SUBREG_P (op_true
))
22901 op_true
= force_reg (dest_mode
, op_true
);
22903 if (!REG_P (op_false
) && !SUBREG_P (op_false
))
22904 op_false
= force_reg (dest_mode
, op_false
);
22906 cond2
= gen_rtx_fmt_ee (NE
, cc_mode
, gen_lowpart (dest_mode
, mask
),
22907 CONST0_RTX (dest_mode
));
22908 emit_insn (gen_rtx_SET (dest
,
22909 gen_rtx_IF_THEN_ELSE (dest_mode
,
22916 /* ISA 3.0 (power9) minmax subcase to emit a XSMAXCDP or XSMINCDP instruction
22917 for SF/DF scalars. Move TRUE_COND to DEST if OP of the operands of the last
22918 comparison is nonzero/true, FALSE_COND if it is zero/false. Return 0 if the
22919 hardware has no such operation. */
22922 rs6000_emit_p9_fp_minmax (rtx dest
, rtx op
, rtx true_cond
, rtx false_cond
)
22924 enum rtx_code code
= GET_CODE (op
);
22925 rtx op0
= XEXP (op
, 0);
22926 rtx op1
= XEXP (op
, 1);
22927 machine_mode compare_mode
= GET_MODE (op0
);
22928 machine_mode result_mode
= GET_MODE (dest
);
22929 bool max_p
= false;
22931 if (result_mode
!= compare_mode
)
22934 if (code
== GE
|| code
== GT
)
22936 else if (code
== LE
|| code
== LT
)
22941 if (rtx_equal_p (op0
, true_cond
) && rtx_equal_p (op1
, false_cond
))
22944 else if (rtx_equal_p (op1
, true_cond
) && rtx_equal_p (op0
, false_cond
))
22950 rs6000_emit_minmax (dest
, max_p
? SMAX
: SMIN
, op0
, op1
);
22954 /* ISA 3.0 (power9) conditional move subcase to emit XSCMP{EQ,GE,GT,NE}DP and
22955 XXSEL instructions for SF/DF scalars. Move TRUE_COND to DEST if OP of the
22956 operands of the last comparison is nonzero/true, FALSE_COND if it is
22957 zero/false. Return 0 if the hardware has no such operation. */
22960 rs6000_emit_p9_fp_cmove (rtx dest
, rtx op
, rtx true_cond
, rtx false_cond
)
22962 enum rtx_code code
= GET_CODE (op
);
22963 rtx op0
= XEXP (op
, 0);
22964 rtx op1
= XEXP (op
, 1);
22965 machine_mode result_mode
= GET_MODE (dest
);
22970 if (!can_create_pseudo_p ())
22983 code
= swap_condition (code
);
22984 std::swap (op0
, op1
);
22991 /* Generate: [(parallel [(set (dest)
22992 (if_then_else (op (cmp1) (cmp2))
22995 (clobber (scratch))])]. */
22997 compare_rtx
= gen_rtx_fmt_ee (code
, CCFPmode
, op0
, op1
);
22998 cmove_rtx
= gen_rtx_SET (dest
,
22999 gen_rtx_IF_THEN_ELSE (result_mode
,
23004 clobber_rtx
= gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (V2DImode
));
23005 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
23006 gen_rtvec (2, cmove_rtx
, clobber_rtx
)));
23011 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
23012 operands of the last comparison is nonzero/true, FALSE_COND if it
23013 is zero/false. Return 0 if the hardware has no such operation. */
23016 rs6000_emit_cmove (rtx dest
, rtx op
, rtx true_cond
, rtx false_cond
)
23018 enum rtx_code code
= GET_CODE (op
);
23019 rtx op0
= XEXP (op
, 0);
23020 rtx op1
= XEXP (op
, 1);
23021 machine_mode compare_mode
= GET_MODE (op0
);
23022 machine_mode result_mode
= GET_MODE (dest
);
23024 bool is_against_zero
;
23026 /* These modes should always match. */
23027 if (GET_MODE (op1
) != compare_mode
23028 /* In the isel case however, we can use a compare immediate, so
23029 op1 may be a small constant. */
23030 && (!TARGET_ISEL
|| !short_cint_operand (op1
, VOIDmode
)))
23032 if (GET_MODE (true_cond
) != result_mode
)
23034 if (GET_MODE (false_cond
) != result_mode
)
23037 /* See if we can use the ISA 3.0 (power9) min/max/compare functions. */
23038 if (TARGET_P9_MINMAX
23039 && (compare_mode
== SFmode
|| compare_mode
== DFmode
)
23040 && (result_mode
== SFmode
|| result_mode
== DFmode
))
23042 if (rs6000_emit_p9_fp_minmax (dest
, op
, true_cond
, false_cond
))
23045 if (rs6000_emit_p9_fp_cmove (dest
, op
, true_cond
, false_cond
))
23049 /* Don't allow using floating point comparisons for integer results for
23051 if (FLOAT_MODE_P (compare_mode
) && !FLOAT_MODE_P (result_mode
))
23054 /* First, work out if the hardware can do this at all, or
23055 if it's too slow.... */
23056 if (!FLOAT_MODE_P (compare_mode
))
23059 return rs6000_emit_int_cmove (dest
, op
, true_cond
, false_cond
);
23063 is_against_zero
= op1
== CONST0_RTX (compare_mode
);
23065 /* A floating-point subtract might overflow, underflow, or produce
23066 an inexact result, thus changing the floating-point flags, so it
23067 can't be generated if we care about that. It's safe if one side
23068 of the construct is zero, since then no subtract will be
23070 if (SCALAR_FLOAT_MODE_P (compare_mode
)
23071 && flag_trapping_math
&& ! is_against_zero
)
23074 /* Eliminate half of the comparisons by switching operands, this
23075 makes the remaining code simpler. */
23076 if (code
== UNLT
|| code
== UNGT
|| code
== UNORDERED
|| code
== NE
23077 || code
== LTGT
|| code
== LT
|| code
== UNLE
)
23079 code
= reverse_condition_maybe_unordered (code
);
23081 true_cond
= false_cond
;
23085 /* UNEQ and LTGT take four instructions for a comparison with zero,
23086 it'll probably be faster to use a branch here too. */
23087 if (code
== UNEQ
&& HONOR_NANS (compare_mode
))
23090 /* We're going to try to implement comparisons by performing
23091 a subtract, then comparing against zero. Unfortunately,
23092 Inf - Inf is NaN which is not zero, and so if we don't
23093 know that the operand is finite and the comparison
23094 would treat EQ different to UNORDERED, we can't do it. */
23095 if (HONOR_INFINITIES (compare_mode
)
23096 && code
!= GT
&& code
!= UNGE
23097 && (GET_CODE (op1
) != CONST_DOUBLE
23098 || real_isinf (CONST_DOUBLE_REAL_VALUE (op1
)))
23099 /* Constructs of the form (a OP b ? a : b) are safe. */
23100 && ((! rtx_equal_p (op0
, false_cond
) && ! rtx_equal_p (op1
, false_cond
))
23101 || (! rtx_equal_p (op0
, true_cond
)
23102 && ! rtx_equal_p (op1
, true_cond
))))
23105 /* At this point we know we can use fsel. */
23107 /* Reduce the comparison to a comparison against zero. */
23108 if (! is_against_zero
)
23110 temp
= gen_reg_rtx (compare_mode
);
23111 emit_insn (gen_rtx_SET (temp
, gen_rtx_MINUS (compare_mode
, op0
, op1
)));
23113 op1
= CONST0_RTX (compare_mode
);
23116 /* If we don't care about NaNs we can reduce some of the comparisons
23117 down to faster ones. */
23118 if (! HONOR_NANS (compare_mode
))
23124 true_cond
= false_cond
;
23137 /* Now, reduce everything down to a GE. */
23144 temp
= gen_reg_rtx (compare_mode
);
23145 emit_insn (gen_rtx_SET (temp
, gen_rtx_NEG (compare_mode
, op0
)));
23150 temp
= gen_reg_rtx (compare_mode
);
23151 emit_insn (gen_rtx_SET (temp
, gen_rtx_ABS (compare_mode
, op0
)));
23156 temp
= gen_reg_rtx (compare_mode
);
23157 emit_insn (gen_rtx_SET (temp
,
23158 gen_rtx_NEG (compare_mode
,
23159 gen_rtx_ABS (compare_mode
, op0
))));
23164 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
23165 temp
= gen_reg_rtx (result_mode
);
23166 emit_insn (gen_rtx_SET (temp
,
23167 gen_rtx_IF_THEN_ELSE (result_mode
,
23168 gen_rtx_GE (VOIDmode
,
23170 true_cond
, false_cond
)));
23171 false_cond
= true_cond
;
23174 temp
= gen_reg_rtx (compare_mode
);
23175 emit_insn (gen_rtx_SET (temp
, gen_rtx_NEG (compare_mode
, op0
)));
23180 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
23181 temp
= gen_reg_rtx (result_mode
);
23182 emit_insn (gen_rtx_SET (temp
,
23183 gen_rtx_IF_THEN_ELSE (result_mode
,
23184 gen_rtx_GE (VOIDmode
,
23186 true_cond
, false_cond
)));
23187 true_cond
= false_cond
;
23190 temp
= gen_reg_rtx (compare_mode
);
23191 emit_insn (gen_rtx_SET (temp
, gen_rtx_NEG (compare_mode
, op0
)));
23196 gcc_unreachable ();
23199 emit_insn (gen_rtx_SET (dest
,
23200 gen_rtx_IF_THEN_ELSE (result_mode
,
23201 gen_rtx_GE (VOIDmode
,
23203 true_cond
, false_cond
)));
23207 /* Same as above, but for ints (isel). */
23210 rs6000_emit_int_cmove (rtx dest
, rtx op
, rtx true_cond
, rtx false_cond
)
23212 rtx condition_rtx
, cr
;
23213 machine_mode mode
= GET_MODE (dest
);
23214 enum rtx_code cond_code
;
23215 rtx (*isel_func
) (rtx
, rtx
, rtx
, rtx
, rtx
);
23218 if (mode
!= SImode
&& (!TARGET_POWERPC64
|| mode
!= DImode
))
23221 /* We still have to do the compare, because isel doesn't do a
23222 compare, it just looks at the CRx bits set by a previous compare
23224 condition_rtx
= rs6000_generate_compare (op
, mode
);
23225 cond_code
= GET_CODE (condition_rtx
);
23226 cr
= XEXP (condition_rtx
, 0);
23227 signedp
= GET_MODE (cr
) == CCmode
;
23229 isel_func
= (mode
== SImode
23230 ? (signedp
? gen_isel_signed_si
: gen_isel_unsigned_si
)
23231 : (signedp
? gen_isel_signed_di
: gen_isel_unsigned_di
));
23235 case LT
: case GT
: case LTU
: case GTU
: case EQ
:
23236 /* isel handles these directly. */
23240 /* We need to swap the sense of the comparison. */
23242 std::swap (false_cond
, true_cond
);
23243 PUT_CODE (condition_rtx
, reverse_condition (cond_code
));
23248 false_cond
= force_reg (mode
, false_cond
);
23249 if (true_cond
!= const0_rtx
)
23250 true_cond
= force_reg (mode
, true_cond
);
23252 emit_insn (isel_func (dest
, condition_rtx
, true_cond
, false_cond
, cr
));
23258 rs6000_emit_minmax (rtx dest
, enum rtx_code code
, rtx op0
, rtx op1
)
23260 machine_mode mode
= GET_MODE (op0
);
23264 /* VSX/altivec have direct min/max insns. */
23265 if ((code
== SMAX
|| code
== SMIN
)
23266 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode
)
23267 || (mode
== SFmode
&& VECTOR_UNIT_VSX_P (DFmode
))))
23269 emit_insn (gen_rtx_SET (dest
, gen_rtx_fmt_ee (code
, mode
, op0
, op1
)));
23273 if (code
== SMAX
|| code
== SMIN
)
23278 if (code
== SMAX
|| code
== UMAX
)
23279 target
= emit_conditional_move (dest
, c
, op0
, op1
, mode
,
23280 op0
, op1
, mode
, 0);
23282 target
= emit_conditional_move (dest
, c
, op0
, op1
, mode
,
23283 op1
, op0
, mode
, 0);
23284 gcc_assert (target
);
23285 if (target
!= dest
)
23286 emit_move_insn (dest
, target
);
23289 /* Split a signbit operation on 64-bit machines with direct move. Also allow
23290 for the value to come from memory or if it is already loaded into a GPR. */
23293 rs6000_split_signbit (rtx dest
, rtx src
)
23295 machine_mode d_mode
= GET_MODE (dest
);
23296 machine_mode s_mode
= GET_MODE (src
);
23297 rtx dest_di
= (d_mode
== DImode
) ? dest
: gen_lowpart (DImode
, dest
);
23298 rtx shift_reg
= dest_di
;
23300 gcc_assert (FLOAT128_IEEE_P (s_mode
) && TARGET_POWERPC64
);
23304 rtx mem
= (WORDS_BIG_ENDIAN
23305 ? adjust_address (src
, DImode
, 0)
23306 : adjust_address (src
, DImode
, 8));
23307 emit_insn (gen_rtx_SET (dest_di
, mem
));
23312 unsigned int r
= reg_or_subregno (src
);
23314 if (INT_REGNO_P (r
))
23315 shift_reg
= gen_rtx_REG (DImode
, r
+ (BYTES_BIG_ENDIAN
== 0));
23319 /* Generate the special mfvsrd instruction to get it in a GPR. */
23320 gcc_assert (VSX_REGNO_P (r
));
23321 if (s_mode
== KFmode
)
23322 emit_insn (gen_signbitkf2_dm2 (dest_di
, src
));
23324 emit_insn (gen_signbittf2_dm2 (dest_di
, src
));
23328 emit_insn (gen_lshrdi3 (dest_di
, shift_reg
, GEN_INT (63)));
23332 /* A subroutine of the atomic operation splitters. Jump to LABEL if
23333 COND is true. Mark the jump as unlikely to be taken. */
23336 emit_unlikely_jump (rtx cond
, rtx label
)
23338 rtx x
= gen_rtx_IF_THEN_ELSE (VOIDmode
, cond
, label
, pc_rtx
);
23339 rtx_insn
*insn
= emit_jump_insn (gen_rtx_SET (pc_rtx
, x
));
23340 add_reg_br_prob_note (insn
, profile_probability::very_unlikely ());
23343 /* A subroutine of the atomic operation splitters. Emit a load-locked
23344 instruction in MODE. For QI/HImode, possibly use a pattern than includes
23345 the zero_extend operation. */
23348 emit_load_locked (machine_mode mode
, rtx reg
, rtx mem
)
23350 rtx (*fn
) (rtx
, rtx
) = NULL
;
23355 fn
= gen_load_lockedqi
;
23358 fn
= gen_load_lockedhi
;
23361 if (GET_MODE (mem
) == QImode
)
23362 fn
= gen_load_lockedqi_si
;
23363 else if (GET_MODE (mem
) == HImode
)
23364 fn
= gen_load_lockedhi_si
;
23366 fn
= gen_load_lockedsi
;
23369 fn
= gen_load_lockeddi
;
23372 fn
= gen_load_lockedti
;
23375 gcc_unreachable ();
23377 emit_insn (fn (reg
, mem
));
23380 /* A subroutine of the atomic operation splitters. Emit a store-conditional
23381 instruction in MODE. */
23384 emit_store_conditional (machine_mode mode
, rtx res
, rtx mem
, rtx val
)
23386 rtx (*fn
) (rtx
, rtx
, rtx
) = NULL
;
23391 fn
= gen_store_conditionalqi
;
23394 fn
= gen_store_conditionalhi
;
23397 fn
= gen_store_conditionalsi
;
23400 fn
= gen_store_conditionaldi
;
23403 fn
= gen_store_conditionalti
;
23406 gcc_unreachable ();
23409 /* Emit sync before stwcx. to address PPC405 Erratum. */
23410 if (PPC405_ERRATUM77
)
23411 emit_insn (gen_hwsync ());
23413 emit_insn (fn (res
, mem
, val
));
23416 /* Expand barriers before and after a load_locked/store_cond sequence. */
23419 rs6000_pre_atomic_barrier (rtx mem
, enum memmodel model
)
23421 rtx addr
= XEXP (mem
, 0);
23423 if (!legitimate_indirect_address_p (addr
, reload_completed
)
23424 && !legitimate_indexed_address_p (addr
, reload_completed
))
23426 addr
= force_reg (Pmode
, addr
);
23427 mem
= replace_equiv_address_nv (mem
, addr
);
23432 case MEMMODEL_RELAXED
:
23433 case MEMMODEL_CONSUME
:
23434 case MEMMODEL_ACQUIRE
:
23436 case MEMMODEL_RELEASE
:
23437 case MEMMODEL_ACQ_REL
:
23438 emit_insn (gen_lwsync ());
23440 case MEMMODEL_SEQ_CST
:
23441 emit_insn (gen_hwsync ());
23444 gcc_unreachable ();
23450 rs6000_post_atomic_barrier (enum memmodel model
)
23454 case MEMMODEL_RELAXED
:
23455 case MEMMODEL_CONSUME
:
23456 case MEMMODEL_RELEASE
:
23458 case MEMMODEL_ACQUIRE
:
23459 case MEMMODEL_ACQ_REL
:
23460 case MEMMODEL_SEQ_CST
:
23461 emit_insn (gen_isync ());
23464 gcc_unreachable ();
23468 /* A subroutine of the various atomic expanders. For sub-word operations,
23469 we must adjust things to operate on SImode. Given the original MEM,
23470 return a new aligned memory. Also build and return the quantities by
23471 which to shift and mask. */
23474 rs6000_adjust_atomic_subword (rtx orig_mem
, rtx
*pshift
, rtx
*pmask
)
23476 rtx addr
, align
, shift
, mask
, mem
;
23477 HOST_WIDE_INT shift_mask
;
23478 machine_mode mode
= GET_MODE (orig_mem
);
23480 /* For smaller modes, we have to implement this via SImode. */
23481 shift_mask
= (mode
== QImode
? 0x18 : 0x10);
23483 addr
= XEXP (orig_mem
, 0);
23484 addr
= force_reg (GET_MODE (addr
), addr
);
23486 /* Aligned memory containing subword. Generate a new memory. We
23487 do not want any of the existing MEM_ATTR data, as we're now
23488 accessing memory outside the original object. */
23489 align
= expand_simple_binop (Pmode
, AND
, addr
, GEN_INT (-4),
23490 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23491 mem
= gen_rtx_MEM (SImode
, align
);
23492 MEM_VOLATILE_P (mem
) = MEM_VOLATILE_P (orig_mem
);
23493 if (MEM_ALIAS_SET (orig_mem
) == ALIAS_SET_MEMORY_BARRIER
)
23494 set_mem_alias_set (mem
, ALIAS_SET_MEMORY_BARRIER
);
23496 /* Shift amount for subword relative to aligned word. */
23497 shift
= gen_reg_rtx (SImode
);
23498 addr
= gen_lowpart (SImode
, addr
);
23499 rtx tmp
= gen_reg_rtx (SImode
);
23500 emit_insn (gen_ashlsi3 (tmp
, addr
, GEN_INT (3)));
23501 emit_insn (gen_andsi3 (shift
, tmp
, GEN_INT (shift_mask
)));
23502 if (BYTES_BIG_ENDIAN
)
23503 shift
= expand_simple_binop (SImode
, XOR
, shift
, GEN_INT (shift_mask
),
23504 shift
, 1, OPTAB_LIB_WIDEN
);
23507 /* Mask for insertion. */
23508 mask
= expand_simple_binop (SImode
, ASHIFT
, GEN_INT (GET_MODE_MASK (mode
)),
23509 shift
, NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23515 /* A subroutine of the various atomic expanders. For sub-word operands,
23516 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
23519 rs6000_mask_atomic_subword (rtx oldval
, rtx newval
, rtx mask
)
23523 x
= gen_reg_rtx (SImode
);
23524 emit_insn (gen_rtx_SET (x
, gen_rtx_AND (SImode
,
23525 gen_rtx_NOT (SImode
, mask
),
23528 x
= expand_simple_binop (SImode
, IOR
, newval
, x
, x
, 1, OPTAB_LIB_WIDEN
);
23533 /* A subroutine of the various atomic expanders. For sub-word operands,
23534 extract WIDE to NARROW via SHIFT. */
23537 rs6000_finish_atomic_subword (rtx narrow
, rtx wide
, rtx shift
)
23539 wide
= expand_simple_binop (SImode
, LSHIFTRT
, wide
, shift
,
23540 wide
, 1, OPTAB_LIB_WIDEN
);
23541 emit_move_insn (narrow
, gen_lowpart (GET_MODE (narrow
), wide
));
23544 /* Expand an atomic compare and swap operation. */
23547 rs6000_expand_atomic_compare_and_swap (rtx operands
[])
23549 rtx boolval
, retval
, mem
, oldval
, newval
, cond
;
23550 rtx label1
, label2
, x
, mask
, shift
;
23551 machine_mode mode
, orig_mode
;
23552 enum memmodel mod_s
, mod_f
;
23555 boolval
= operands
[0];
23556 retval
= operands
[1];
23558 oldval
= operands
[3];
23559 newval
= operands
[4];
23560 is_weak
= (INTVAL (operands
[5]) != 0);
23561 mod_s
= memmodel_base (INTVAL (operands
[6]));
23562 mod_f
= memmodel_base (INTVAL (operands
[7]));
23563 orig_mode
= mode
= GET_MODE (mem
);
23565 mask
= shift
= NULL_RTX
;
23566 if (mode
== QImode
|| mode
== HImode
)
23568 /* Before power8, we didn't have access to lbarx/lharx, so generate a
23569 lwarx and shift/mask operations. With power8, we need to do the
23570 comparison in SImode, but the store is still done in QI/HImode. */
23571 oldval
= convert_modes (SImode
, mode
, oldval
, 1);
23573 if (!TARGET_SYNC_HI_QI
)
23575 mem
= rs6000_adjust_atomic_subword (mem
, &shift
, &mask
);
23577 /* Shift and mask OLDVAL into position with the word. */
23578 oldval
= expand_simple_binop (SImode
, ASHIFT
, oldval
, shift
,
23579 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23581 /* Shift and mask NEWVAL into position within the word. */
23582 newval
= convert_modes (SImode
, mode
, newval
, 1);
23583 newval
= expand_simple_binop (SImode
, ASHIFT
, newval
, shift
,
23584 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23587 /* Prepare to adjust the return value. */
23588 retval
= gen_reg_rtx (SImode
);
23591 else if (reg_overlap_mentioned_p (retval
, oldval
))
23592 oldval
= copy_to_reg (oldval
);
23594 if (mode
!= TImode
&& !reg_or_short_operand (oldval
, mode
))
23595 oldval
= copy_to_mode_reg (mode
, oldval
);
23597 if (reg_overlap_mentioned_p (retval
, newval
))
23598 newval
= copy_to_reg (newval
);
23600 mem
= rs6000_pre_atomic_barrier (mem
, mod_s
);
23605 label1
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
23606 emit_label (XEXP (label1
, 0));
23608 label2
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
23610 emit_load_locked (mode
, retval
, mem
);
23614 x
= expand_simple_binop (SImode
, AND
, retval
, mask
,
23615 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23617 cond
= gen_reg_rtx (CCmode
);
23618 /* If we have TImode, synthesize a comparison. */
23619 if (mode
!= TImode
)
23620 x
= gen_rtx_COMPARE (CCmode
, x
, oldval
);
23623 rtx xor1_result
= gen_reg_rtx (DImode
);
23624 rtx xor2_result
= gen_reg_rtx (DImode
);
23625 rtx or_result
= gen_reg_rtx (DImode
);
23626 rtx new_word0
= simplify_gen_subreg (DImode
, x
, TImode
, 0);
23627 rtx new_word1
= simplify_gen_subreg (DImode
, x
, TImode
, 8);
23628 rtx old_word0
= simplify_gen_subreg (DImode
, oldval
, TImode
, 0);
23629 rtx old_word1
= simplify_gen_subreg (DImode
, oldval
, TImode
, 8);
23631 emit_insn (gen_xordi3 (xor1_result
, new_word0
, old_word0
));
23632 emit_insn (gen_xordi3 (xor2_result
, new_word1
, old_word1
));
23633 emit_insn (gen_iordi3 (or_result
, xor1_result
, xor2_result
));
23634 x
= gen_rtx_COMPARE (CCmode
, or_result
, const0_rtx
);
23637 emit_insn (gen_rtx_SET (cond
, x
));
23639 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
23640 emit_unlikely_jump (x
, label2
);
23644 x
= rs6000_mask_atomic_subword (retval
, newval
, mask
);
23646 emit_store_conditional (orig_mode
, cond
, mem
, x
);
23650 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
23651 emit_unlikely_jump (x
, label1
);
23654 if (!is_mm_relaxed (mod_f
))
23655 emit_label (XEXP (label2
, 0));
23657 rs6000_post_atomic_barrier (mod_s
);
23659 if (is_mm_relaxed (mod_f
))
23660 emit_label (XEXP (label2
, 0));
23663 rs6000_finish_atomic_subword (operands
[1], retval
, shift
);
23664 else if (mode
!= GET_MODE (operands
[1]))
23665 convert_move (operands
[1], retval
, 1);
23667 /* In all cases, CR0 contains EQ on success, and NE on failure. */
23668 x
= gen_rtx_EQ (SImode
, cond
, const0_rtx
);
23669 emit_insn (gen_rtx_SET (boolval
, x
));
23672 /* Expand an atomic exchange operation. */
23675 rs6000_expand_atomic_exchange (rtx operands
[])
23677 rtx retval
, mem
, val
, cond
;
23679 enum memmodel model
;
23680 rtx label
, x
, mask
, shift
;
23682 retval
= operands
[0];
23685 model
= memmodel_base (INTVAL (operands
[3]));
23686 mode
= GET_MODE (mem
);
23688 mask
= shift
= NULL_RTX
;
23689 if (!TARGET_SYNC_HI_QI
&& (mode
== QImode
|| mode
== HImode
))
23691 mem
= rs6000_adjust_atomic_subword (mem
, &shift
, &mask
);
23693 /* Shift and mask VAL into position with the word. */
23694 val
= convert_modes (SImode
, mode
, val
, 1);
23695 val
= expand_simple_binop (SImode
, ASHIFT
, val
, shift
,
23696 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23698 /* Prepare to adjust the return value. */
23699 retval
= gen_reg_rtx (SImode
);
23703 mem
= rs6000_pre_atomic_barrier (mem
, model
);
23705 label
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
23706 emit_label (XEXP (label
, 0));
23708 emit_load_locked (mode
, retval
, mem
);
23712 x
= rs6000_mask_atomic_subword (retval
, val
, mask
);
23714 cond
= gen_reg_rtx (CCmode
);
23715 emit_store_conditional (mode
, cond
, mem
, x
);
23717 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
23718 emit_unlikely_jump (x
, label
);
23720 rs6000_post_atomic_barrier (model
);
23723 rs6000_finish_atomic_subword (operands
[0], retval
, shift
);
23726 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
23727 to perform. MEM is the memory on which to operate. VAL is the second
23728 operand of the binary operator. BEFORE and AFTER are optional locations to
23729 return the value of MEM either before of after the operation. MODEL_RTX
23730 is a CONST_INT containing the memory model to use. */
23733 rs6000_expand_atomic_op (enum rtx_code code
, rtx mem
, rtx val
,
23734 rtx orig_before
, rtx orig_after
, rtx model_rtx
)
23736 enum memmodel model
= memmodel_base (INTVAL (model_rtx
));
23737 machine_mode mode
= GET_MODE (mem
);
23738 machine_mode store_mode
= mode
;
23739 rtx label
, x
, cond
, mask
, shift
;
23740 rtx before
= orig_before
, after
= orig_after
;
23742 mask
= shift
= NULL_RTX
;
23743 /* On power8, we want to use SImode for the operation. On previous systems,
23744 use the operation in a subword and shift/mask to get the proper byte or
23746 if (mode
== QImode
|| mode
== HImode
)
23748 if (TARGET_SYNC_HI_QI
)
23750 val
= convert_modes (SImode
, mode
, val
, 1);
23752 /* Prepare to adjust the return value. */
23753 before
= gen_reg_rtx (SImode
);
23755 after
= gen_reg_rtx (SImode
);
23760 mem
= rs6000_adjust_atomic_subword (mem
, &shift
, &mask
);
23762 /* Shift and mask VAL into position with the word. */
23763 val
= convert_modes (SImode
, mode
, val
, 1);
23764 val
= expand_simple_binop (SImode
, ASHIFT
, val
, shift
,
23765 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23771 /* We've already zero-extended VAL. That is sufficient to
23772 make certain that it does not affect other bits. */
23777 /* If we make certain that all of the other bits in VAL are
23778 set, that will be sufficient to not affect other bits. */
23779 x
= gen_rtx_NOT (SImode
, mask
);
23780 x
= gen_rtx_IOR (SImode
, x
, val
);
23781 emit_insn (gen_rtx_SET (val
, x
));
23788 /* These will all affect bits outside the field and need
23789 adjustment via MASK within the loop. */
23793 gcc_unreachable ();
23796 /* Prepare to adjust the return value. */
23797 before
= gen_reg_rtx (SImode
);
23799 after
= gen_reg_rtx (SImode
);
23800 store_mode
= mode
= SImode
;
23804 mem
= rs6000_pre_atomic_barrier (mem
, model
);
23806 label
= gen_label_rtx ();
23807 emit_label (label
);
23808 label
= gen_rtx_LABEL_REF (VOIDmode
, label
);
23810 if (before
== NULL_RTX
)
23811 before
= gen_reg_rtx (mode
);
23813 emit_load_locked (mode
, before
, mem
);
23817 x
= expand_simple_binop (mode
, AND
, before
, val
,
23818 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23819 after
= expand_simple_unop (mode
, NOT
, x
, after
, 1);
23823 after
= expand_simple_binop (mode
, code
, before
, val
,
23824 after
, 1, OPTAB_LIB_WIDEN
);
23830 x
= expand_simple_binop (SImode
, AND
, after
, mask
,
23831 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23832 x
= rs6000_mask_atomic_subword (before
, x
, mask
);
23834 else if (store_mode
!= mode
)
23835 x
= convert_modes (store_mode
, mode
, x
, 1);
23837 cond
= gen_reg_rtx (CCmode
);
23838 emit_store_conditional (store_mode
, cond
, mem
, x
);
23840 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
23841 emit_unlikely_jump (x
, label
);
23843 rs6000_post_atomic_barrier (model
);
23847 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
23848 then do the calcuations in a SImode register. */
23850 rs6000_finish_atomic_subword (orig_before
, before
, shift
);
23852 rs6000_finish_atomic_subword (orig_after
, after
, shift
);
23854 else if (store_mode
!= mode
)
23856 /* QImode/HImode on machines with lbarx/lharx where we do the native
23857 operation and then do the calcuations in a SImode register. */
23859 convert_move (orig_before
, before
, 1);
23861 convert_move (orig_after
, after
, 1);
23863 else if (orig_after
&& after
!= orig_after
)
23864 emit_move_insn (orig_after
, after
);
23867 /* Emit instructions to move SRC to DST. Called by splitters for
23868 multi-register moves. It will emit at most one instruction for
23869 each register that is accessed; that is, it won't emit li/lis pairs
23870 (or equivalent for 64-bit code). One of SRC or DST must be a hard
23874 rs6000_split_multireg_move (rtx dst
, rtx src
)
23876 /* The register number of the first register being moved. */
23878 /* The mode that is to be moved. */
23880 /* The mode that the move is being done in, and its size. */
23881 machine_mode reg_mode
;
23883 /* The number of registers that will be moved. */
23886 reg
= REG_P (dst
) ? REGNO (dst
) : REGNO (src
);
23887 mode
= GET_MODE (dst
);
23888 nregs
= hard_regno_nregs (reg
, mode
);
23889 if (FP_REGNO_P (reg
))
23890 reg_mode
= DECIMAL_FLOAT_MODE_P (mode
) ? DDmode
:
23891 ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
) ? DFmode
: SFmode
);
23892 else if (ALTIVEC_REGNO_P (reg
))
23893 reg_mode
= V16QImode
;
23895 reg_mode
= word_mode
;
23896 reg_mode_size
= GET_MODE_SIZE (reg_mode
);
23898 gcc_assert (reg_mode_size
* nregs
== GET_MODE_SIZE (mode
));
23900 /* TDmode residing in FP registers is special, since the ISA requires that
23901 the lower-numbered word of a register pair is always the most significant
23902 word, even in little-endian mode. This does not match the usual subreg
23903 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
23904 the appropriate constituent registers "by hand" in little-endian mode.
23906 Note we do not need to check for destructive overlap here since TDmode
23907 can only reside in even/odd register pairs. */
23908 if (FP_REGNO_P (reg
) && DECIMAL_FLOAT_MODE_P (mode
) && !BYTES_BIG_ENDIAN
)
23913 for (i
= 0; i
< nregs
; i
++)
23915 if (REG_P (src
) && FP_REGNO_P (REGNO (src
)))
23916 p_src
= gen_rtx_REG (reg_mode
, REGNO (src
) + nregs
- 1 - i
);
23918 p_src
= simplify_gen_subreg (reg_mode
, src
, mode
,
23919 i
* reg_mode_size
);
23921 if (REG_P (dst
) && FP_REGNO_P (REGNO (dst
)))
23922 p_dst
= gen_rtx_REG (reg_mode
, REGNO (dst
) + nregs
- 1 - i
);
23924 p_dst
= simplify_gen_subreg (reg_mode
, dst
, mode
,
23925 i
* reg_mode_size
);
23927 emit_insn (gen_rtx_SET (p_dst
, p_src
));
23933 if (REG_P (src
) && REG_P (dst
) && (REGNO (src
) < REGNO (dst
)))
23935 /* Move register range backwards, if we might have destructive
23938 for (i
= nregs
- 1; i
>= 0; i
--)
23939 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode
, dst
, mode
,
23940 i
* reg_mode_size
),
23941 simplify_gen_subreg (reg_mode
, src
, mode
,
23942 i
* reg_mode_size
)));
23948 bool used_update
= false;
23949 rtx restore_basereg
= NULL_RTX
;
23951 if (MEM_P (src
) && INT_REGNO_P (reg
))
23955 if (GET_CODE (XEXP (src
, 0)) == PRE_INC
23956 || GET_CODE (XEXP (src
, 0)) == PRE_DEC
)
23959 breg
= XEXP (XEXP (src
, 0), 0);
23960 delta_rtx
= (GET_CODE (XEXP (src
, 0)) == PRE_INC
23961 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src
)))
23962 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src
))));
23963 emit_insn (gen_add3_insn (breg
, breg
, delta_rtx
));
23964 src
= replace_equiv_address (src
, breg
);
23966 else if (! rs6000_offsettable_memref_p (src
, reg_mode
))
23968 if (GET_CODE (XEXP (src
, 0)) == PRE_MODIFY
)
23970 rtx basereg
= XEXP (XEXP (src
, 0), 0);
23973 rtx ndst
= simplify_gen_subreg (reg_mode
, dst
, mode
, 0);
23974 emit_insn (gen_rtx_SET (ndst
,
23975 gen_rtx_MEM (reg_mode
,
23977 used_update
= true;
23980 emit_insn (gen_rtx_SET (basereg
,
23981 XEXP (XEXP (src
, 0), 1)));
23982 src
= replace_equiv_address (src
, basereg
);
23986 rtx basereg
= gen_rtx_REG (Pmode
, reg
);
23987 emit_insn (gen_rtx_SET (basereg
, XEXP (src
, 0)));
23988 src
= replace_equiv_address (src
, basereg
);
23992 breg
= XEXP (src
, 0);
23993 if (GET_CODE (breg
) == PLUS
|| GET_CODE (breg
) == LO_SUM
)
23994 breg
= XEXP (breg
, 0);
23996 /* If the base register we are using to address memory is
23997 also a destination reg, then change that register last. */
23999 && REGNO (breg
) >= REGNO (dst
)
24000 && REGNO (breg
) < REGNO (dst
) + nregs
)
24001 j
= REGNO (breg
) - REGNO (dst
);
24003 else if (MEM_P (dst
) && INT_REGNO_P (reg
))
24007 if (GET_CODE (XEXP (dst
, 0)) == PRE_INC
24008 || GET_CODE (XEXP (dst
, 0)) == PRE_DEC
)
24011 breg
= XEXP (XEXP (dst
, 0), 0);
24012 delta_rtx
= (GET_CODE (XEXP (dst
, 0)) == PRE_INC
24013 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst
)))
24014 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst
))));
24016 /* We have to update the breg before doing the store.
24017 Use store with update, if available. */
24021 rtx nsrc
= simplify_gen_subreg (reg_mode
, src
, mode
, 0);
24022 emit_insn (TARGET_32BIT
24023 ? (TARGET_POWERPC64
24024 ? gen_movdi_si_update (breg
, breg
, delta_rtx
, nsrc
)
24025 : gen_movsi_update (breg
, breg
, delta_rtx
, nsrc
))
24026 : gen_movdi_di_update (breg
, breg
, delta_rtx
, nsrc
));
24027 used_update
= true;
24030 emit_insn (gen_add3_insn (breg
, breg
, delta_rtx
));
24031 dst
= replace_equiv_address (dst
, breg
);
24033 else if (!rs6000_offsettable_memref_p (dst
, reg_mode
)
24034 && GET_CODE (XEXP (dst
, 0)) != LO_SUM
)
24036 if (GET_CODE (XEXP (dst
, 0)) == PRE_MODIFY
)
24038 rtx basereg
= XEXP (XEXP (dst
, 0), 0);
24041 rtx nsrc
= simplify_gen_subreg (reg_mode
, src
, mode
, 0);
24042 emit_insn (gen_rtx_SET (gen_rtx_MEM (reg_mode
,
24045 used_update
= true;
24048 emit_insn (gen_rtx_SET (basereg
,
24049 XEXP (XEXP (dst
, 0), 1)));
24050 dst
= replace_equiv_address (dst
, basereg
);
24054 rtx basereg
= XEXP (XEXP (dst
, 0), 0);
24055 rtx offsetreg
= XEXP (XEXP (dst
, 0), 1);
24056 gcc_assert (GET_CODE (XEXP (dst
, 0)) == PLUS
24058 && REG_P (offsetreg
)
24059 && REGNO (basereg
) != REGNO (offsetreg
));
24060 if (REGNO (basereg
) == 0)
24062 rtx tmp
= offsetreg
;
24063 offsetreg
= basereg
;
24066 emit_insn (gen_add3_insn (basereg
, basereg
, offsetreg
));
24067 restore_basereg
= gen_sub3_insn (basereg
, basereg
, offsetreg
);
24068 dst
= replace_equiv_address (dst
, basereg
);
24071 else if (GET_CODE (XEXP (dst
, 0)) != LO_SUM
)
24072 gcc_assert (rs6000_offsettable_memref_p (dst
, reg_mode
));
24075 for (i
= 0; i
< nregs
; i
++)
24077 /* Calculate index to next subword. */
24082 /* If compiler already emitted move of first word by
24083 store with update, no need to do anything. */
24084 if (j
== 0 && used_update
)
24087 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode
, dst
, mode
,
24088 j
* reg_mode_size
),
24089 simplify_gen_subreg (reg_mode
, src
, mode
,
24090 j
* reg_mode_size
)));
24092 if (restore_basereg
!= NULL_RTX
)
24093 emit_insn (restore_basereg
);
24098 /* This page contains routines that are used to determine what the
24099 function prologue and epilogue code will do and write them out. */
24101 /* Determine whether the REG is really used. */
24104 save_reg_p (int reg
)
24106 /* We need to mark the PIC offset register live for the same conditions
24107 as it is set up, or otherwise it won't be saved before we clobber it. */
24109 if (reg
== RS6000_PIC_OFFSET_TABLE_REGNUM
&& !TARGET_SINGLE_PIC_BASE
)
24111 /* When calling eh_return, we must return true for all the cases
24112 where conditional_register_usage marks the PIC offset reg
24114 if (TARGET_TOC
&& TARGET_MINIMAL_TOC
24115 && (crtl
->calls_eh_return
24116 || df_regs_ever_live_p (reg
)
24117 || !constant_pool_empty_p ()))
24120 if ((DEFAULT_ABI
== ABI_V4
|| DEFAULT_ABI
== ABI_DARWIN
)
24125 return !call_used_regs
[reg
] && df_regs_ever_live_p (reg
);
24128 /* Return the first fixed-point register that is required to be
24129 saved. 32 if none. */
24132 first_reg_to_save (void)
24136 /* Find lowest numbered live register. */
24137 for (first_reg
= 13; first_reg
<= 31; first_reg
++)
24138 if (save_reg_p (first_reg
))
24143 && crtl
->uses_pic_offset_table
24144 && first_reg
> RS6000_PIC_OFFSET_TABLE_REGNUM
)
24145 return RS6000_PIC_OFFSET_TABLE_REGNUM
;
24151 /* Similar, for FP regs. */
24154 first_fp_reg_to_save (void)
24158 /* Find lowest numbered live register. */
24159 for (first_reg
= 14 + 32; first_reg
<= 63; first_reg
++)
24160 if (save_reg_p (first_reg
))
24166 /* Similar, for AltiVec regs. */
24169 first_altivec_reg_to_save (void)
24173 /* Stack frame remains as is unless we are in AltiVec ABI. */
24174 if (! TARGET_ALTIVEC_ABI
)
24175 return LAST_ALTIVEC_REGNO
+ 1;
24177 /* On Darwin, the unwind routines are compiled without
24178 TARGET_ALTIVEC, and use save_world to save/restore the
24179 altivec registers when necessary. */
24180 if (DEFAULT_ABI
== ABI_DARWIN
&& crtl
->calls_eh_return
24181 && ! TARGET_ALTIVEC
)
24182 return FIRST_ALTIVEC_REGNO
+ 20;
24184 /* Find lowest numbered live register. */
24185 for (i
= FIRST_ALTIVEC_REGNO
+ 20; i
<= LAST_ALTIVEC_REGNO
; ++i
)
24186 if (save_reg_p (i
))
24192 /* Return a 32-bit mask of the AltiVec registers we need to set in
24193 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
24194 the 32-bit word is 0. */
24196 static unsigned int
24197 compute_vrsave_mask (void)
24199 unsigned int i
, mask
= 0;
24201 /* On Darwin, the unwind routines are compiled without
24202 TARGET_ALTIVEC, and use save_world to save/restore the
24203 call-saved altivec registers when necessary. */
24204 if (DEFAULT_ABI
== ABI_DARWIN
&& crtl
->calls_eh_return
24205 && ! TARGET_ALTIVEC
)
24208 /* First, find out if we use _any_ altivec registers. */
24209 for (i
= FIRST_ALTIVEC_REGNO
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
24210 if (df_regs_ever_live_p (i
))
24211 mask
|= ALTIVEC_REG_BIT (i
);
24216 /* Next, remove the argument registers from the set. These must
24217 be in the VRSAVE mask set by the caller, so we don't need to add
24218 them in again. More importantly, the mask we compute here is
24219 used to generate CLOBBERs in the set_vrsave insn, and we do not
24220 wish the argument registers to die. */
24221 for (i
= ALTIVEC_ARG_MIN_REG
; i
< (unsigned) crtl
->args
.info
.vregno
; i
++)
24222 mask
&= ~ALTIVEC_REG_BIT (i
);
24224 /* Similarly, remove the return value from the set. */
24227 diddle_return_value (is_altivec_return_reg
, &yes
);
24229 mask
&= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN
);
24235 /* For a very restricted set of circumstances, we can cut down the
24236 size of prologues/epilogues by calling our own save/restore-the-world
24240 compute_save_world_info (rs6000_stack_t
*info
)
24242 info
->world_save_p
= 1;
24244 = (WORLD_SAVE_P (info
)
24245 && DEFAULT_ABI
== ABI_DARWIN
24246 && !cfun
->has_nonlocal_label
24247 && info
->first_fp_reg_save
== FIRST_SAVED_FP_REGNO
24248 && info
->first_gp_reg_save
== FIRST_SAVED_GP_REGNO
24249 && info
->first_altivec_reg_save
== FIRST_SAVED_ALTIVEC_REGNO
24250 && info
->cr_save_p
);
24252 /* This will not work in conjunction with sibcalls. Make sure there
24253 are none. (This check is expensive, but seldom executed.) */
24254 if (WORLD_SAVE_P (info
))
24257 for (insn
= get_last_insn_anywhere (); insn
; insn
= PREV_INSN (insn
))
24258 if (CALL_P (insn
) && SIBLING_CALL_P (insn
))
24260 info
->world_save_p
= 0;
24265 if (WORLD_SAVE_P (info
))
24267 /* Even if we're not touching VRsave, make sure there's room on the
24268 stack for it, if it looks like we're calling SAVE_WORLD, which
24269 will attempt to save it. */
24270 info
->vrsave_size
= 4;
24272 /* If we are going to save the world, we need to save the link register too. */
24273 info
->lr_save_p
= 1;
24275 /* "Save" the VRsave register too if we're saving the world. */
24276 if (info
->vrsave_mask
== 0)
24277 info
->vrsave_mask
= compute_vrsave_mask ();
24279 /* Because the Darwin register save/restore routines only handle
24280 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
24282 gcc_assert (info
->first_fp_reg_save
>= FIRST_SAVED_FP_REGNO
24283 && (info
->first_altivec_reg_save
24284 >= FIRST_SAVED_ALTIVEC_REGNO
));
24292 is_altivec_return_reg (rtx reg
, void *xyes
)
24294 bool *yes
= (bool *) xyes
;
24295 if (REGNO (reg
) == ALTIVEC_ARG_RETURN
)
24300 /* Return whether REG is a global user reg or has been specifed by
24301 -ffixed-REG. We should not restore these, and so cannot use
24302 lmw or out-of-line restore functions if there are any. We also
24303 can't save them (well, emit frame notes for them), because frame
24304 unwinding during exception handling will restore saved registers. */
24307 fixed_reg_p (int reg
)
24309 /* Ignore fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] when the
24310 backend sets it, overriding anything the user might have given. */
24311 if (reg
== RS6000_PIC_OFFSET_TABLE_REGNUM
24312 && ((DEFAULT_ABI
== ABI_V4
&& flag_pic
)
24313 || (DEFAULT_ABI
== ABI_DARWIN
&& flag_pic
)
24314 || (TARGET_TOC
&& TARGET_MINIMAL_TOC
)))
24317 return fixed_regs
[reg
];
24320 /* Determine the strategy for savings/restoring registers. */
24323 SAVE_MULTIPLE
= 0x1,
24324 SAVE_INLINE_GPRS
= 0x2,
24325 SAVE_INLINE_FPRS
= 0x4,
24326 SAVE_NOINLINE_GPRS_SAVES_LR
= 0x8,
24327 SAVE_NOINLINE_FPRS_SAVES_LR
= 0x10,
24328 SAVE_INLINE_VRS
= 0x20,
24329 REST_MULTIPLE
= 0x100,
24330 REST_INLINE_GPRS
= 0x200,
24331 REST_INLINE_FPRS
= 0x400,
24332 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
= 0x800,
24333 REST_INLINE_VRS
= 0x1000
24337 rs6000_savres_strategy (rs6000_stack_t
*info
,
24338 bool using_static_chain_p
)
24342 /* Select between in-line and out-of-line save and restore of regs.
24343 First, all the obvious cases where we don't use out-of-line. */
24344 if (crtl
->calls_eh_return
24345 || cfun
->machine
->ra_need_lr
)
24346 strategy
|= (SAVE_INLINE_FPRS
| REST_INLINE_FPRS
24347 | SAVE_INLINE_GPRS
| REST_INLINE_GPRS
24348 | SAVE_INLINE_VRS
| REST_INLINE_VRS
);
24350 if (info
->first_gp_reg_save
== 32)
24351 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
24353 if (info
->first_fp_reg_save
== 64
24354 /* The out-of-line FP routines use double-precision stores;
24355 we can't use those routines if we don't have such stores. */
24356 || (TARGET_HARD_FLOAT
&& !TARGET_DOUBLE_FLOAT
))
24357 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
24359 if (info
->first_altivec_reg_save
== LAST_ALTIVEC_REGNO
+ 1)
24360 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
24362 /* Define cutoff for using out-of-line functions to save registers. */
24363 if (DEFAULT_ABI
== ABI_V4
|| TARGET_ELF
)
24365 if (!optimize_size
)
24367 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
24368 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
24369 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
24373 /* Prefer out-of-line restore if it will exit. */
24374 if (info
->first_fp_reg_save
> 61)
24375 strategy
|= SAVE_INLINE_FPRS
;
24376 if (info
->first_gp_reg_save
> 29)
24378 if (info
->first_fp_reg_save
== 64)
24379 strategy
|= SAVE_INLINE_GPRS
;
24381 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
24383 if (info
->first_altivec_reg_save
== LAST_ALTIVEC_REGNO
)
24384 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
24387 else if (DEFAULT_ABI
== ABI_DARWIN
)
24389 if (info
->first_fp_reg_save
> 60)
24390 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
24391 if (info
->first_gp_reg_save
> 29)
24392 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
24393 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
24397 gcc_checking_assert (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
);
24398 if ((flag_shrink_wrap_separate
&& optimize_function_for_speed_p (cfun
))
24399 || info
->first_fp_reg_save
> 61)
24400 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
24401 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
24402 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
24405 /* Don't bother to try to save things out-of-line if r11 is occupied
24406 by the static chain. It would require too much fiddling and the
24407 static chain is rarely used anyway. FPRs are saved w.r.t the stack
24408 pointer on Darwin, and AIX uses r1 or r12. */
24409 if (using_static_chain_p
24410 && (DEFAULT_ABI
== ABI_V4
|| DEFAULT_ABI
== ABI_DARWIN
))
24411 strategy
|= ((DEFAULT_ABI
== ABI_DARWIN
? 0 : SAVE_INLINE_FPRS
)
24413 | SAVE_INLINE_VRS
);
24415 /* Don't ever restore fixed regs. That means we can't use the
24416 out-of-line register restore functions if a fixed reg is in the
24417 range of regs restored. */
24418 if (!(strategy
& REST_INLINE_FPRS
))
24419 for (int i
= info
->first_fp_reg_save
; i
< 64; i
++)
24422 strategy
|= REST_INLINE_FPRS
;
24426 /* We can only use the out-of-line routines to restore fprs if we've
24427 saved all the registers from first_fp_reg_save in the prologue.
24428 Otherwise, we risk loading garbage. Of course, if we have saved
24429 out-of-line then we know we haven't skipped any fprs. */
24430 if ((strategy
& SAVE_INLINE_FPRS
)
24431 && !(strategy
& REST_INLINE_FPRS
))
24432 for (int i
= info
->first_fp_reg_save
; i
< 64; i
++)
24433 if (!save_reg_p (i
))
24435 strategy
|= REST_INLINE_FPRS
;
24439 /* Similarly, for altivec regs. */
24440 if (!(strategy
& REST_INLINE_VRS
))
24441 for (int i
= info
->first_altivec_reg_save
; i
< LAST_ALTIVEC_REGNO
+ 1; i
++)
24444 strategy
|= REST_INLINE_VRS
;
24448 if ((strategy
& SAVE_INLINE_VRS
)
24449 && !(strategy
& REST_INLINE_VRS
))
24450 for (int i
= info
->first_altivec_reg_save
; i
< LAST_ALTIVEC_REGNO
+ 1; i
++)
24451 if (!save_reg_p (i
))
24453 strategy
|= REST_INLINE_VRS
;
24457 /* info->lr_save_p isn't yet set if the only reason lr needs to be
24458 saved is an out-of-line save or restore. Set up the value for
24459 the next test (excluding out-of-line gprs). */
24460 bool lr_save_p
= (info
->lr_save_p
24461 || !(strategy
& SAVE_INLINE_FPRS
)
24462 || !(strategy
& SAVE_INLINE_VRS
)
24463 || !(strategy
& REST_INLINE_FPRS
)
24464 || !(strategy
& REST_INLINE_VRS
));
24466 if (TARGET_MULTIPLE
24467 && !TARGET_POWERPC64
24468 && info
->first_gp_reg_save
< 31
24469 && !(flag_shrink_wrap
24470 && flag_shrink_wrap_separate
24471 && optimize_function_for_speed_p (cfun
)))
24474 for (int i
= info
->first_gp_reg_save
; i
< 32; i
++)
24475 if (save_reg_p (i
))
24479 /* Don't use store multiple if only one reg needs to be
24480 saved. This can occur for example when the ABI_V4 pic reg
24481 (r30) needs to be saved to make calls, but r31 is not
24483 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
24486 /* Prefer store multiple for saves over out-of-line
24487 routines, since the store-multiple instruction will
24488 always be smaller. */
24489 strategy
|= SAVE_INLINE_GPRS
| SAVE_MULTIPLE
;
24491 /* The situation is more complicated with load multiple.
24492 We'd prefer to use the out-of-line routines for restores,
24493 since the "exit" out-of-line routines can handle the
24494 restore of LR and the frame teardown. However if doesn't
24495 make sense to use the out-of-line routine if that is the
24496 only reason we'd need to save LR, and we can't use the
24497 "exit" out-of-line gpr restore if we have saved some
24498 fprs; In those cases it is advantageous to use load
24499 multiple when available. */
24500 if (info
->first_fp_reg_save
!= 64 || !lr_save_p
)
24501 strategy
|= REST_INLINE_GPRS
| REST_MULTIPLE
;
24505 /* Using the "exit" out-of-line routine does not improve code size
24506 if using it would require lr to be saved and if only saving one
24508 else if (!lr_save_p
&& info
->first_gp_reg_save
> 29)
24509 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
24511 /* Don't ever restore fixed regs. */
24512 if ((strategy
& (REST_INLINE_GPRS
| REST_MULTIPLE
)) != REST_INLINE_GPRS
)
24513 for (int i
= info
->first_gp_reg_save
; i
< 32; i
++)
24514 if (fixed_reg_p (i
))
24516 strategy
|= REST_INLINE_GPRS
;
24517 strategy
&= ~REST_MULTIPLE
;
24521 /* We can only use load multiple or the out-of-line routines to
24522 restore gprs if we've saved all the registers from
24523 first_gp_reg_save. Otherwise, we risk loading garbage.
24524 Of course, if we have saved out-of-line or used stmw then we know
24525 we haven't skipped any gprs. */
24526 if ((strategy
& (SAVE_INLINE_GPRS
| SAVE_MULTIPLE
)) == SAVE_INLINE_GPRS
24527 && (strategy
& (REST_INLINE_GPRS
| REST_MULTIPLE
)) != REST_INLINE_GPRS
)
24528 for (int i
= info
->first_gp_reg_save
; i
< 32; i
++)
24529 if (!save_reg_p (i
))
24531 strategy
|= REST_INLINE_GPRS
;
24532 strategy
&= ~REST_MULTIPLE
;
24536 if (TARGET_ELF
&& TARGET_64BIT
)
24538 if (!(strategy
& SAVE_INLINE_FPRS
))
24539 strategy
|= SAVE_NOINLINE_FPRS_SAVES_LR
;
24540 else if (!(strategy
& SAVE_INLINE_GPRS
)
24541 && info
->first_fp_reg_save
== 64)
24542 strategy
|= SAVE_NOINLINE_GPRS_SAVES_LR
;
24544 else if (TARGET_AIX
&& !(strategy
& REST_INLINE_FPRS
))
24545 strategy
|= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
;
24547 if (TARGET_MACHO
&& !(strategy
& SAVE_INLINE_FPRS
))
24548 strategy
|= SAVE_NOINLINE_FPRS_SAVES_LR
;
24553 /* Calculate the stack information for the current function. This is
24554 complicated by having two separate calling sequences, the AIX calling
24555 sequence and the V.4 calling sequence.
24557 AIX (and Darwin/Mac OS X) stack frames look like:
24559 SP----> +---------------------------------------+
24560 | back chain to caller | 0 0
24561 +---------------------------------------+
24562 | saved CR | 4 8 (8-11)
24563 +---------------------------------------+
24565 +---------------------------------------+
24566 | reserved for compilers | 12 24
24567 +---------------------------------------+
24568 | reserved for binders | 16 32
24569 +---------------------------------------+
24570 | saved TOC pointer | 20 40
24571 +---------------------------------------+
24572 | Parameter save area (+padding*) (P) | 24 48
24573 +---------------------------------------+
24574 | Alloca space (A) | 24+P etc.
24575 +---------------------------------------+
24576 | Local variable space (L) | 24+P+A
24577 +---------------------------------------+
24578 | Float/int conversion temporary (X) | 24+P+A+L
24579 +---------------------------------------+
24580 | Save area for AltiVec registers (W) | 24+P+A+L+X
24581 +---------------------------------------+
24582 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
24583 +---------------------------------------+
24584 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
24585 +---------------------------------------+
24586 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
24587 +---------------------------------------+
24588 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
24589 +---------------------------------------+
24590 old SP->| back chain to caller's caller |
24591 +---------------------------------------+
24593 * If the alloca area is present, the parameter save area is
24594 padded so that the former starts 16-byte aligned.
24596 The required alignment for AIX configurations is two words (i.e., 8
24599 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
24601 SP----> +---------------------------------------+
24602 | Back chain to caller | 0
24603 +---------------------------------------+
24604 | Save area for CR | 8
24605 +---------------------------------------+
24607 +---------------------------------------+
24608 | Saved TOC pointer | 24
24609 +---------------------------------------+
24610 | Parameter save area (+padding*) (P) | 32
24611 +---------------------------------------+
24612 | Alloca space (A) | 32+P
24613 +---------------------------------------+
24614 | Local variable space (L) | 32+P+A
24615 +---------------------------------------+
24616 | Save area for AltiVec registers (W) | 32+P+A+L
24617 +---------------------------------------+
24618 | AltiVec alignment padding (Y) | 32+P+A+L+W
24619 +---------------------------------------+
24620 | Save area for GP registers (G) | 32+P+A+L+W+Y
24621 +---------------------------------------+
24622 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
24623 +---------------------------------------+
24624 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
24625 +---------------------------------------+
24627 * If the alloca area is present, the parameter save area is
24628 padded so that the former starts 16-byte aligned.
24630 V.4 stack frames look like:
24632 SP----> +---------------------------------------+
24633 | back chain to caller | 0
24634 +---------------------------------------+
24635 | caller's saved LR | 4
24636 +---------------------------------------+
24637 | Parameter save area (+padding*) (P) | 8
24638 +---------------------------------------+
24639 | Alloca space (A) | 8+P
24640 +---------------------------------------+
24641 | Varargs save area (V) | 8+P+A
24642 +---------------------------------------+
24643 | Local variable space (L) | 8+P+A+V
24644 +---------------------------------------+
24645 | Float/int conversion temporary (X) | 8+P+A+V+L
24646 +---------------------------------------+
24647 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
24648 +---------------------------------------+
24649 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
24650 +---------------------------------------+
24651 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
24652 +---------------------------------------+
24653 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
24654 +---------------------------------------+
24655 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
24656 +---------------------------------------+
24657 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
24658 +---------------------------------------+
24659 old SP->| back chain to caller's caller |
24660 +---------------------------------------+
24662 * If the alloca area is present and the required alignment is
24663 16 bytes, the parameter save area is padded so that the
24664 alloca area starts 16-byte aligned.
24666 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
24667 given. (But note below and in sysv4.h that we require only 8 and
24668 may round up the size of our stack frame anyways. The historical
24669 reason is early versions of powerpc-linux which didn't properly
24670 align the stack at program startup. A happy side-effect is that
24671 -mno-eabi libraries can be used with -meabi programs.)
24673 The EABI configuration defaults to the V.4 layout. However,
24674 the stack alignment requirements may differ. If -mno-eabi is not
24675 given, the required stack alignment is 8 bytes; if -mno-eabi is
24676 given, the required alignment is 16 bytes. (But see V.4 comment
24679 #ifndef ABI_STACK_BOUNDARY
24680 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
24683 static rs6000_stack_t
*
24684 rs6000_stack_info (void)
24686 /* We should never be called for thunks, we are not set up for that. */
24687 gcc_assert (!cfun
->is_thunk
);
24689 rs6000_stack_t
*info
= &stack_info
;
24690 int reg_size
= TARGET_32BIT
? 4 : 8;
24695 HOST_WIDE_INT non_fixed_size
;
24696 bool using_static_chain_p
;
24698 if (reload_completed
&& info
->reload_completed
)
24701 memset (info
, 0, sizeof (*info
));
24702 info
->reload_completed
= reload_completed
;
24704 /* Select which calling sequence. */
24705 info
->abi
= DEFAULT_ABI
;
24707 /* Calculate which registers need to be saved & save area size. */
24708 info
->first_gp_reg_save
= first_reg_to_save ();
24709 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
24710 even if it currently looks like we won't. Reload may need it to
24711 get at a constant; if so, it will have already created a constant
24712 pool entry for it. */
24713 if (((TARGET_TOC
&& TARGET_MINIMAL_TOC
)
24714 || (flag_pic
== 1 && DEFAULT_ABI
== ABI_V4
)
24715 || (flag_pic
&& DEFAULT_ABI
== ABI_DARWIN
))
24716 && crtl
->uses_const_pool
24717 && info
->first_gp_reg_save
> RS6000_PIC_OFFSET_TABLE_REGNUM
)
24718 first_gp
= RS6000_PIC_OFFSET_TABLE_REGNUM
;
24720 first_gp
= info
->first_gp_reg_save
;
24722 info
->gp_size
= reg_size
* (32 - first_gp
);
24724 info
->first_fp_reg_save
= first_fp_reg_to_save ();
24725 info
->fp_size
= 8 * (64 - info
->first_fp_reg_save
);
24727 info
->first_altivec_reg_save
= first_altivec_reg_to_save ();
24728 info
->altivec_size
= 16 * (LAST_ALTIVEC_REGNO
+ 1
24729 - info
->first_altivec_reg_save
);
24731 /* Does this function call anything? */
24732 info
->calls_p
= (!crtl
->is_leaf
|| cfun
->machine
->ra_needs_full_frame
);
24734 /* Determine if we need to save the condition code registers. */
24735 if (save_reg_p (CR2_REGNO
)
24736 || save_reg_p (CR3_REGNO
)
24737 || save_reg_p (CR4_REGNO
))
24739 info
->cr_save_p
= 1;
24740 if (DEFAULT_ABI
== ABI_V4
)
24741 info
->cr_size
= reg_size
;
24744 /* If the current function calls __builtin_eh_return, then we need
24745 to allocate stack space for registers that will hold data for
24746 the exception handler. */
24747 if (crtl
->calls_eh_return
)
24750 for (i
= 0; EH_RETURN_DATA_REGNO (i
) != INVALID_REGNUM
; ++i
)
24753 ehrd_size
= i
* UNITS_PER_WORD
;
24758 /* In the ELFv2 ABI, we also need to allocate space for separate
24759 CR field save areas if the function calls __builtin_eh_return. */
24760 if (DEFAULT_ABI
== ABI_ELFv2
&& crtl
->calls_eh_return
)
24762 /* This hard-codes that we have three call-saved CR fields. */
24763 ehcr_size
= 3 * reg_size
;
24764 /* We do *not* use the regular CR save mechanism. */
24765 info
->cr_save_p
= 0;
24770 /* Determine various sizes. */
24771 info
->reg_size
= reg_size
;
24772 info
->fixed_size
= RS6000_SAVE_AREA
;
24773 info
->vars_size
= RS6000_ALIGN (get_frame_size (), 8);
24774 if (cfun
->calls_alloca
)
24776 RS6000_ALIGN (crtl
->outgoing_args_size
+ info
->fixed_size
,
24777 STACK_BOUNDARY
/ BITS_PER_UNIT
) - info
->fixed_size
;
24779 info
->parm_size
= RS6000_ALIGN (crtl
->outgoing_args_size
,
24780 TARGET_ALTIVEC
? 16 : 8);
24781 if (FRAME_GROWS_DOWNWARD
)
24783 += RS6000_ALIGN (info
->fixed_size
+ info
->vars_size
+ info
->parm_size
,
24784 ABI_STACK_BOUNDARY
/ BITS_PER_UNIT
)
24785 - (info
->fixed_size
+ info
->vars_size
+ info
->parm_size
);
24787 if (TARGET_ALTIVEC_ABI
)
24788 info
->vrsave_mask
= compute_vrsave_mask ();
24790 if (TARGET_ALTIVEC_VRSAVE
&& info
->vrsave_mask
)
24791 info
->vrsave_size
= 4;
24793 compute_save_world_info (info
);
24795 /* Calculate the offsets. */
24796 switch (DEFAULT_ABI
)
24800 gcc_unreachable ();
24805 info
->fp_save_offset
= -info
->fp_size
;
24806 info
->gp_save_offset
= info
->fp_save_offset
- info
->gp_size
;
24808 if (TARGET_ALTIVEC_ABI
)
24810 info
->vrsave_save_offset
= info
->gp_save_offset
- info
->vrsave_size
;
24812 /* Align stack so vector save area is on a quadword boundary.
24813 The padding goes above the vectors. */
24814 if (info
->altivec_size
!= 0)
24815 info
->altivec_padding_size
= info
->vrsave_save_offset
& 0xF;
24817 info
->altivec_save_offset
= info
->vrsave_save_offset
24818 - info
->altivec_padding_size
24819 - info
->altivec_size
;
24820 gcc_assert (info
->altivec_size
== 0
24821 || info
->altivec_save_offset
% 16 == 0);
24823 /* Adjust for AltiVec case. */
24824 info
->ehrd_offset
= info
->altivec_save_offset
- ehrd_size
;
24827 info
->ehrd_offset
= info
->gp_save_offset
- ehrd_size
;
24829 info
->ehcr_offset
= info
->ehrd_offset
- ehcr_size
;
24830 info
->cr_save_offset
= reg_size
; /* first word when 64-bit. */
24831 info
->lr_save_offset
= 2*reg_size
;
24835 info
->fp_save_offset
= -info
->fp_size
;
24836 info
->gp_save_offset
= info
->fp_save_offset
- info
->gp_size
;
24837 info
->cr_save_offset
= info
->gp_save_offset
- info
->cr_size
;
24839 if (TARGET_ALTIVEC_ABI
)
24841 info
->vrsave_save_offset
= info
->cr_save_offset
- info
->vrsave_size
;
24843 /* Align stack so vector save area is on a quadword boundary. */
24844 if (info
->altivec_size
!= 0)
24845 info
->altivec_padding_size
= 16 - (-info
->vrsave_save_offset
% 16);
24847 info
->altivec_save_offset
= info
->vrsave_save_offset
24848 - info
->altivec_padding_size
24849 - info
->altivec_size
;
24851 /* Adjust for AltiVec case. */
24852 info
->ehrd_offset
= info
->altivec_save_offset
;
24855 info
->ehrd_offset
= info
->cr_save_offset
;
24857 info
->ehrd_offset
-= ehrd_size
;
24858 info
->lr_save_offset
= reg_size
;
24861 save_align
= (TARGET_ALTIVEC_ABI
|| DEFAULT_ABI
== ABI_DARWIN
) ? 16 : 8;
24862 info
->save_size
= RS6000_ALIGN (info
->fp_size
24864 + info
->altivec_size
24865 + info
->altivec_padding_size
24869 + info
->vrsave_size
,
24872 non_fixed_size
= info
->vars_size
+ info
->parm_size
+ info
->save_size
;
24874 info
->total_size
= RS6000_ALIGN (non_fixed_size
+ info
->fixed_size
,
24875 ABI_STACK_BOUNDARY
/ BITS_PER_UNIT
);
24877 /* Determine if we need to save the link register. */
24879 || ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
24881 && !TARGET_PROFILE_KERNEL
)
24882 || (DEFAULT_ABI
== ABI_V4
&& cfun
->calls_alloca
)
24883 #ifdef TARGET_RELOCATABLE
24884 || (DEFAULT_ABI
== ABI_V4
24885 && (TARGET_RELOCATABLE
|| flag_pic
> 1)
24886 && !constant_pool_empty_p ())
24888 || rs6000_ra_ever_killed ())
24889 info
->lr_save_p
= 1;
24891 using_static_chain_p
= (cfun
->static_chain_decl
!= NULL_TREE
24892 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM
)
24893 && call_used_regs
[STATIC_CHAIN_REGNUM
]);
24894 info
->savres_strategy
= rs6000_savres_strategy (info
, using_static_chain_p
);
24896 if (!(info
->savres_strategy
& SAVE_INLINE_GPRS
)
24897 || !(info
->savres_strategy
& SAVE_INLINE_FPRS
)
24898 || !(info
->savres_strategy
& SAVE_INLINE_VRS
)
24899 || !(info
->savres_strategy
& REST_INLINE_GPRS
)
24900 || !(info
->savres_strategy
& REST_INLINE_FPRS
)
24901 || !(info
->savres_strategy
& REST_INLINE_VRS
))
24902 info
->lr_save_p
= 1;
24904 if (info
->lr_save_p
)
24905 df_set_regs_ever_live (LR_REGNO
, true);
24907 /* Determine if we need to allocate any stack frame:
24909 For AIX we need to push the stack if a frame pointer is needed
24910 (because the stack might be dynamically adjusted), if we are
24911 debugging, if we make calls, or if the sum of fp_save, gp_save,
24912 and local variables are more than the space needed to save all
24913 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
24914 + 18*8 = 288 (GPR13 reserved).
24916 For V.4 we don't have the stack cushion that AIX uses, but assume
24917 that the debugger can handle stackless frames. */
24922 else if (DEFAULT_ABI
== ABI_V4
)
24923 info
->push_p
= non_fixed_size
!= 0;
24925 else if (frame_pointer_needed
)
24928 else if (TARGET_XCOFF
&& write_symbols
!= NO_DEBUG
)
24932 info
->push_p
= non_fixed_size
> (TARGET_32BIT
? 220 : 288);
24938 debug_stack_info (rs6000_stack_t
*info
)
24940 const char *abi_string
;
24943 info
= rs6000_stack_info ();
24945 fprintf (stderr
, "\nStack information for function %s:\n",
24946 ((current_function_decl
&& DECL_NAME (current_function_decl
))
24947 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl
))
24952 default: abi_string
= "Unknown"; break;
24953 case ABI_NONE
: abi_string
= "NONE"; break;
24954 case ABI_AIX
: abi_string
= "AIX"; break;
24955 case ABI_ELFv2
: abi_string
= "ELFv2"; break;
24956 case ABI_DARWIN
: abi_string
= "Darwin"; break;
24957 case ABI_V4
: abi_string
= "V.4"; break;
24960 fprintf (stderr
, "\tABI = %5s\n", abi_string
);
24962 if (TARGET_ALTIVEC_ABI
)
24963 fprintf (stderr
, "\tALTIVEC ABI extensions enabled.\n");
24965 if (info
->first_gp_reg_save
!= 32)
24966 fprintf (stderr
, "\tfirst_gp_reg_save = %5d\n", info
->first_gp_reg_save
);
24968 if (info
->first_fp_reg_save
!= 64)
24969 fprintf (stderr
, "\tfirst_fp_reg_save = %5d\n", info
->first_fp_reg_save
);
24971 if (info
->first_altivec_reg_save
<= LAST_ALTIVEC_REGNO
)
24972 fprintf (stderr
, "\tfirst_altivec_reg_save = %5d\n",
24973 info
->first_altivec_reg_save
);
24975 if (info
->lr_save_p
)
24976 fprintf (stderr
, "\tlr_save_p = %5d\n", info
->lr_save_p
);
24978 if (info
->cr_save_p
)
24979 fprintf (stderr
, "\tcr_save_p = %5d\n", info
->cr_save_p
);
24981 if (info
->vrsave_mask
)
24982 fprintf (stderr
, "\tvrsave_mask = 0x%x\n", info
->vrsave_mask
);
24985 fprintf (stderr
, "\tpush_p = %5d\n", info
->push_p
);
24988 fprintf (stderr
, "\tcalls_p = %5d\n", info
->calls_p
);
24991 fprintf (stderr
, "\tgp_save_offset = %5d\n", info
->gp_save_offset
);
24994 fprintf (stderr
, "\tfp_save_offset = %5d\n", info
->fp_save_offset
);
24996 if (info
->altivec_size
)
24997 fprintf (stderr
, "\taltivec_save_offset = %5d\n",
24998 info
->altivec_save_offset
);
25000 if (info
->vrsave_size
)
25001 fprintf (stderr
, "\tvrsave_save_offset = %5d\n",
25002 info
->vrsave_save_offset
);
25004 if (info
->lr_save_p
)
25005 fprintf (stderr
, "\tlr_save_offset = %5d\n", info
->lr_save_offset
);
25007 if (info
->cr_save_p
)
25008 fprintf (stderr
, "\tcr_save_offset = %5d\n", info
->cr_save_offset
);
25010 if (info
->varargs_save_offset
)
25011 fprintf (stderr
, "\tvarargs_save_offset = %5d\n", info
->varargs_save_offset
);
25013 if (info
->total_size
)
25014 fprintf (stderr
, "\ttotal_size = " HOST_WIDE_INT_PRINT_DEC
"\n",
25017 if (info
->vars_size
)
25018 fprintf (stderr
, "\tvars_size = " HOST_WIDE_INT_PRINT_DEC
"\n",
25021 if (info
->parm_size
)
25022 fprintf (stderr
, "\tparm_size = %5d\n", info
->parm_size
);
25024 if (info
->fixed_size
)
25025 fprintf (stderr
, "\tfixed_size = %5d\n", info
->fixed_size
);
25028 fprintf (stderr
, "\tgp_size = %5d\n", info
->gp_size
);
25031 fprintf (stderr
, "\tfp_size = %5d\n", info
->fp_size
);
25033 if (info
->altivec_size
)
25034 fprintf (stderr
, "\taltivec_size = %5d\n", info
->altivec_size
);
25036 if (info
->vrsave_size
)
25037 fprintf (stderr
, "\tvrsave_size = %5d\n", info
->vrsave_size
);
25039 if (info
->altivec_padding_size
)
25040 fprintf (stderr
, "\taltivec_padding_size= %5d\n",
25041 info
->altivec_padding_size
);
25044 fprintf (stderr
, "\tcr_size = %5d\n", info
->cr_size
);
25046 if (info
->save_size
)
25047 fprintf (stderr
, "\tsave_size = %5d\n", info
->save_size
);
25049 if (info
->reg_size
!= 4)
25050 fprintf (stderr
, "\treg_size = %5d\n", info
->reg_size
);
25052 fprintf (stderr
, "\tsave-strategy = %04x\n", info
->savres_strategy
);
25054 fprintf (stderr
, "\n");
25058 rs6000_return_addr (int count
, rtx frame
)
25060 /* We can't use get_hard_reg_initial_val for LR when count == 0 if LR
25061 is trashed by the prologue, as it is for PIC on ABI_V4 and Darwin. */
25063 || ((DEFAULT_ABI
== ABI_V4
|| DEFAULT_ABI
== ABI_DARWIN
) && flag_pic
))
25065 cfun
->machine
->ra_needs_full_frame
= 1;
25068 /* FRAME is set to frame_pointer_rtx by the generic code, but that
25069 is good for loading 0(r1) only when !FRAME_GROWS_DOWNWARD. */
25070 frame
= stack_pointer_rtx
;
25071 rtx prev_frame_addr
= memory_address (Pmode
, frame
);
25072 rtx prev_frame
= copy_to_reg (gen_rtx_MEM (Pmode
, prev_frame_addr
));
25073 rtx lr_save_off
= plus_constant (Pmode
,
25074 prev_frame
, RETURN_ADDRESS_OFFSET
);
25075 rtx lr_save_addr
= memory_address (Pmode
, lr_save_off
);
25076 return gen_rtx_MEM (Pmode
, lr_save_addr
);
25079 cfun
->machine
->ra_need_lr
= 1;
25080 return get_hard_reg_initial_val (Pmode
, LR_REGNO
);
25083 /* Say whether a function is a candidate for sibcall handling or not. */
25086 rs6000_function_ok_for_sibcall (tree decl
, tree exp
)
25091 fntype
= TREE_TYPE (decl
);
25093 fntype
= TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp
)));
25095 /* We can't do it if the called function has more vector parameters
25096 than the current function; there's nowhere to put the VRsave code. */
25097 if (TARGET_ALTIVEC_ABI
25098 && TARGET_ALTIVEC_VRSAVE
25099 && !(decl
&& decl
== current_function_decl
))
25101 function_args_iterator args_iter
;
25105 /* Functions with vector parameters are required to have a
25106 prototype, so the argument type info must be available
25108 FOREACH_FUNCTION_ARGS(fntype
, type
, args_iter
)
25109 if (TREE_CODE (type
) == VECTOR_TYPE
25110 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type
)))
25113 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl
), type
, args_iter
)
25114 if (TREE_CODE (type
) == VECTOR_TYPE
25115 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type
)))
25122 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
25123 functions, because the callee may have a different TOC pointer to
25124 the caller and there's no way to ensure we restore the TOC when
25125 we return. With the secure-plt SYSV ABI we can't make non-local
25126 calls when -fpic/PIC because the plt call stubs use r30. */
25127 if (DEFAULT_ABI
== ABI_DARWIN
25128 || ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
25130 && !DECL_EXTERNAL (decl
)
25131 && !DECL_WEAK (decl
)
25132 && (*targetm
.binds_local_p
) (decl
))
25133 || (DEFAULT_ABI
== ABI_V4
25134 && (!TARGET_SECURE_PLT
25137 && (*targetm
.binds_local_p
) (decl
)))))
25139 tree attr_list
= TYPE_ATTRIBUTES (fntype
);
25141 if (!lookup_attribute ("longcall", attr_list
)
25142 || lookup_attribute ("shortcall", attr_list
))
25150 rs6000_ra_ever_killed (void)
25156 if (cfun
->is_thunk
)
25159 if (cfun
->machine
->lr_save_state
)
25160 return cfun
->machine
->lr_save_state
- 1;
25162 /* regs_ever_live has LR marked as used if any sibcalls are present,
25163 but this should not force saving and restoring in the
25164 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
25165 clobbers LR, so that is inappropriate. */
25167 /* Also, the prologue can generate a store into LR that
25168 doesn't really count, like this:
25171 bcl to set PIC register
25175 When we're called from the epilogue, we need to avoid counting
25176 this as a store. */
25178 push_topmost_sequence ();
25179 top
= get_insns ();
25180 pop_topmost_sequence ();
25181 reg
= gen_rtx_REG (Pmode
, LR_REGNO
);
25183 for (insn
= NEXT_INSN (top
); insn
!= NULL_RTX
; insn
= NEXT_INSN (insn
))
25189 if (!SIBLING_CALL_P (insn
))
25192 else if (find_regno_note (insn
, REG_INC
, LR_REGNO
))
25194 else if (set_of (reg
, insn
) != NULL_RTX
25195 && !prologue_epilogue_contains (insn
))
25202 /* Emit instructions needed to load the TOC register.
25203 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
25204 a constant pool; or for SVR4 -fpic. */
25207 rs6000_emit_load_toc_table (int fromprolog
)
25210 dest
= gen_rtx_REG (Pmode
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
25212 if (TARGET_ELF
&& TARGET_SECURE_PLT
&& DEFAULT_ABI
== ABI_V4
&& flag_pic
)
25215 rtx lab
, tmp1
, tmp2
, got
;
25217 lab
= gen_label_rtx ();
25218 ASM_GENERATE_INTERNAL_LABEL (buf
, "L", CODE_LABEL_NUMBER (lab
));
25219 lab
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
25222 got
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (toc_label_name
));
25226 got
= rs6000_got_sym ();
25227 tmp1
= tmp2
= dest
;
25230 tmp1
= gen_reg_rtx (Pmode
);
25231 tmp2
= gen_reg_rtx (Pmode
);
25233 emit_insn (gen_load_toc_v4_PIC_1 (lab
));
25234 emit_move_insn (tmp1
, gen_rtx_REG (Pmode
, LR_REGNO
));
25235 emit_insn (gen_load_toc_v4_PIC_3b (tmp2
, tmp1
, got
, lab
));
25236 emit_insn (gen_load_toc_v4_PIC_3c (dest
, tmp2
, got
, lab
));
25238 else if (TARGET_ELF
&& DEFAULT_ABI
== ABI_V4
&& flag_pic
== 1)
25240 emit_insn (gen_load_toc_v4_pic_si ());
25241 emit_move_insn (dest
, gen_rtx_REG (Pmode
, LR_REGNO
));
25243 else if (TARGET_ELF
&& DEFAULT_ABI
== ABI_V4
&& flag_pic
== 2)
25246 rtx temp0
= (fromprolog
25247 ? gen_rtx_REG (Pmode
, 0)
25248 : gen_reg_rtx (Pmode
));
25254 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
25255 symF
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
25257 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCL", rs6000_pic_labelno
);
25258 symL
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
25260 emit_insn (gen_load_toc_v4_PIC_1 (symF
));
25261 emit_move_insn (dest
, gen_rtx_REG (Pmode
, LR_REGNO
));
25262 emit_insn (gen_load_toc_v4_PIC_2 (temp0
, dest
, symL
, symF
));
25268 tocsym
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (toc_label_name
));
25270 lab
= gen_label_rtx ();
25271 emit_insn (gen_load_toc_v4_PIC_1b (tocsym
, lab
));
25272 emit_move_insn (dest
, gen_rtx_REG (Pmode
, LR_REGNO
));
25273 if (TARGET_LINK_STACK
)
25274 emit_insn (gen_addsi3 (dest
, dest
, GEN_INT (4)));
25275 emit_move_insn (temp0
, gen_rtx_MEM (Pmode
, dest
));
25277 emit_insn (gen_addsi3 (dest
, temp0
, dest
));
25279 else if (TARGET_ELF
&& !TARGET_AIX
&& flag_pic
== 0 && TARGET_MINIMAL_TOC
)
25281 /* This is for AIX code running in non-PIC ELF32. */
25282 rtx realsym
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (toc_label_name
));
25285 emit_insn (gen_elf_high (dest
, realsym
));
25286 emit_insn (gen_elf_low (dest
, dest
, realsym
));
25290 gcc_assert (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
);
25293 emit_insn (gen_load_toc_aix_si (dest
));
25295 emit_insn (gen_load_toc_aix_di (dest
));
25299 /* Emit instructions to restore the link register after determining where
25300 its value has been stored. */
25303 rs6000_emit_eh_reg_restore (rtx source
, rtx scratch
)
25305 rs6000_stack_t
*info
= rs6000_stack_info ();
25308 operands
[0] = source
;
25309 operands
[1] = scratch
;
25311 if (info
->lr_save_p
)
25313 rtx frame_rtx
= stack_pointer_rtx
;
25314 HOST_WIDE_INT sp_offset
= 0;
25317 if (frame_pointer_needed
25318 || cfun
->calls_alloca
25319 || info
->total_size
> 32767)
25321 tmp
= gen_frame_mem (Pmode
, frame_rtx
);
25322 emit_move_insn (operands
[1], tmp
);
25323 frame_rtx
= operands
[1];
25325 else if (info
->push_p
)
25326 sp_offset
= info
->total_size
;
25328 tmp
= plus_constant (Pmode
, frame_rtx
,
25329 info
->lr_save_offset
+ sp_offset
);
25330 tmp
= gen_frame_mem (Pmode
, tmp
);
25331 emit_move_insn (tmp
, operands
[0]);
25334 emit_move_insn (gen_rtx_REG (Pmode
, LR_REGNO
), operands
[0]);
25336 /* Freeze lr_save_p. We've just emitted rtl that depends on the
25337 state of lr_save_p so any change from here on would be a bug. In
25338 particular, stop rs6000_ra_ever_killed from considering the SET
25339 of lr we may have added just above. */
25340 cfun
->machine
->lr_save_state
= info
->lr_save_p
+ 1;
25343 static GTY(()) alias_set_type set
= -1;
25346 get_TOC_alias_set (void)
25349 set
= new_alias_set ();
25353 /* This returns nonzero if the current function uses the TOC. This is
25354 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
25355 is generated by the ABI_V4 load_toc_* patterns.
25356 Return 2 instead of 1 if the load_toc_* pattern is in the function
25357 partition that doesn't start the function. */
25365 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
25369 rtx pat
= PATTERN (insn
);
25372 if (GET_CODE (pat
) == PARALLEL
)
25373 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
25375 rtx sub
= XVECEXP (pat
, 0, i
);
25376 if (GET_CODE (sub
) == USE
)
25378 sub
= XEXP (sub
, 0);
25379 if (GET_CODE (sub
) == UNSPEC
25380 && XINT (sub
, 1) == UNSPEC_TOC
)
25385 else if (crtl
->has_bb_partition
25387 && NOTE_KIND (insn
) == NOTE_INSN_SWITCH_TEXT_SECTIONS
)
25395 create_TOC_reference (rtx symbol
, rtx largetoc_reg
)
25397 rtx tocrel
, tocreg
, hi
;
25399 if (TARGET_DEBUG_ADDR
)
25401 if (GET_CODE (symbol
) == SYMBOL_REF
)
25402 fprintf (stderr
, "\ncreate_TOC_reference, (symbol_ref %s)\n",
25406 fprintf (stderr
, "\ncreate_TOC_reference, code %s:\n",
25407 GET_RTX_NAME (GET_CODE (symbol
)));
25408 debug_rtx (symbol
);
25412 if (!can_create_pseudo_p ())
25413 df_set_regs_ever_live (TOC_REGISTER
, true);
25415 tocreg
= gen_rtx_REG (Pmode
, TOC_REGISTER
);
25416 tocrel
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (2, symbol
, tocreg
), UNSPEC_TOCREL
);
25417 if (TARGET_CMODEL
== CMODEL_SMALL
|| can_create_pseudo_p ())
25420 hi
= gen_rtx_HIGH (Pmode
, copy_rtx (tocrel
));
25421 if (largetoc_reg
!= NULL
)
25423 emit_move_insn (largetoc_reg
, hi
);
25426 return gen_rtx_LO_SUM (Pmode
, hi
, tocrel
);
25429 /* Issue assembly directives that create a reference to the given DWARF
25430 FRAME_TABLE_LABEL from the current function section. */
25432 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label
)
25434 fprintf (asm_out_file
, "\t.ref %s\n",
25435 (* targetm
.strip_name_encoding
) (frame_table_label
));
25438 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
25439 and the change to the stack pointer. */
25442 rs6000_emit_stack_tie (rtx fp
, bool hard_frame_needed
)
25449 regs
[i
++] = gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
25450 if (hard_frame_needed
)
25451 regs
[i
++] = gen_rtx_REG (Pmode
, HARD_FRAME_POINTER_REGNUM
);
25452 if (!(REGNO (fp
) == STACK_POINTER_REGNUM
25453 || (hard_frame_needed
25454 && REGNO (fp
) == HARD_FRAME_POINTER_REGNUM
)))
25457 p
= rtvec_alloc (i
);
25460 rtx mem
= gen_frame_mem (BLKmode
, regs
[i
]);
25461 RTVEC_ELT (p
, i
) = gen_rtx_SET (mem
, const0_rtx
);
25464 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode
, p
)));
25467 /* Allocate SIZE_INT bytes on the stack using a store with update style insn
25468 and set the appropriate attributes for the generated insn. Return the
25469 first insn which adjusts the stack pointer or the last insn before
25470 the stack adjustment loop.
25472 SIZE_INT is used to create the CFI note for the allocation.
25474 SIZE_RTX is an rtx containing the size of the adjustment. Note that
25475 since stacks grow to lower addresses its runtime value is -SIZE_INT.
25477 ORIG_SP contains the backchain value that must be stored at *sp. */
25480 rs6000_emit_allocate_stack_1 (HOST_WIDE_INT size_int
, rtx orig_sp
)
25484 rtx size_rtx
= GEN_INT (-size_int
);
25485 if (size_int
> 32767)
25487 rtx tmp_reg
= gen_rtx_REG (Pmode
, 0);
25488 /* Need a note here so that try_split doesn't get confused. */
25489 if (get_last_insn () == NULL_RTX
)
25490 emit_note (NOTE_INSN_DELETED
);
25491 insn
= emit_move_insn (tmp_reg
, size_rtx
);
25492 try_split (PATTERN (insn
), insn
, 0);
25493 size_rtx
= tmp_reg
;
25496 if (Pmode
== SImode
)
25497 insn
= emit_insn (gen_movsi_update_stack (stack_pointer_rtx
,
25502 insn
= emit_insn (gen_movdi_di_update_stack (stack_pointer_rtx
,
25506 rtx par
= PATTERN (insn
);
25507 gcc_assert (GET_CODE (par
) == PARALLEL
);
25508 rtx set
= XVECEXP (par
, 0, 0);
25509 gcc_assert (GET_CODE (set
) == SET
);
25510 rtx mem
= SET_DEST (set
);
25511 gcc_assert (MEM_P (mem
));
25512 MEM_NOTRAP_P (mem
) = 1;
25513 set_mem_alias_set (mem
, get_frame_alias_set ());
25515 RTX_FRAME_RELATED_P (insn
) = 1;
25516 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
25517 gen_rtx_SET (stack_pointer_rtx
,
25518 gen_rtx_PLUS (Pmode
,
25520 GEN_INT (-size_int
))));
25522 /* Emit a blockage to ensure the allocation/probing insns are
25523 not optimized, combined, removed, etc. Add REG_STACK_CHECK
25524 note for similar reasons. */
25525 if (flag_stack_clash_protection
)
25527 add_reg_note (insn
, REG_STACK_CHECK
, const0_rtx
);
25528 emit_insn (gen_blockage ());
25534 static HOST_WIDE_INT
25535 get_stack_clash_protection_probe_interval (void)
25537 return (HOST_WIDE_INT_1U
25538 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL
));
25541 static HOST_WIDE_INT
25542 get_stack_clash_protection_guard_size (void)
25544 return (HOST_WIDE_INT_1U
25545 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE
));
25548 /* Allocate ORIG_SIZE bytes on the stack and probe the newly
25549 allocated space every STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes.
25551 COPY_REG, if non-null, should contain a copy of the original
25552 stack pointer at exit from this function.
25554 This is subtly different than the Ada probing in that it tries hard to
25555 prevent attacks that jump the stack guard. Thus it is never allowed to
25556 allocate more than STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes of stack
25557 space without a suitable probe. */
25559 rs6000_emit_probe_stack_range_stack_clash (HOST_WIDE_INT orig_size
,
25562 rtx orig_sp
= copy_reg
;
25564 HOST_WIDE_INT probe_interval
= get_stack_clash_protection_probe_interval ();
25566 /* Round the size down to a multiple of PROBE_INTERVAL. */
25567 HOST_WIDE_INT rounded_size
= ROUND_DOWN (orig_size
, probe_interval
);
25569 /* If explicitly requested,
25570 or the rounded size is not the same as the original size
25571 or the the rounded size is greater than a page,
25572 then we will need a copy of the original stack pointer. */
25573 if (rounded_size
!= orig_size
25574 || rounded_size
> probe_interval
25577 /* If the caller did not request a copy of the incoming stack
25578 pointer, then we use r0 to hold the copy. */
25580 orig_sp
= gen_rtx_REG (Pmode
, 0);
25581 emit_move_insn (orig_sp
, stack_pointer_rtx
);
25584 /* There's three cases here.
25586 One is a single probe which is the most common and most efficiently
25587 implemented as it does not have to have a copy of the original
25588 stack pointer if there are no residuals.
25590 Second is unrolled allocation/probes which we use if there's just
25591 a few of them. It needs to save the original stack pointer into a
25592 temporary for use as a source register in the allocation/probe.
25594 Last is a loop. This is the most uncommon case and least efficient. */
25595 rtx_insn
*retval
= NULL
;
25596 if (rounded_size
== probe_interval
)
25598 retval
= rs6000_emit_allocate_stack_1 (probe_interval
, stack_pointer_rtx
);
25600 dump_stack_clash_frame_info (PROBE_INLINE
, rounded_size
!= orig_size
);
25602 else if (rounded_size
<= 8 * probe_interval
)
25604 /* The ABI requires using the store with update insns to allocate
25605 space and store the backchain into the stack
25607 So we save the current stack pointer into a temporary, then
25608 emit the store-with-update insns to store the saved stack pointer
25609 into the right location in each new page. */
25610 for (int i
= 0; i
< rounded_size
; i
+= probe_interval
)
25613 = rs6000_emit_allocate_stack_1 (probe_interval
, orig_sp
);
25615 /* Save the first stack adjustment in RETVAL. */
25620 dump_stack_clash_frame_info (PROBE_INLINE
, rounded_size
!= orig_size
);
25624 /* Compute the ending address. */
25626 = copy_reg
? gen_rtx_REG (Pmode
, 0) : gen_rtx_REG (Pmode
, 12);
25627 rtx rs
= GEN_INT (-rounded_size
);
25629 if (add_operand (rs
, Pmode
))
25630 insn
= emit_insn (gen_add3_insn (end_addr
, stack_pointer_rtx
, rs
));
25633 emit_move_insn (end_addr
, GEN_INT (-rounded_size
));
25634 insn
= emit_insn (gen_add3_insn (end_addr
, end_addr
,
25635 stack_pointer_rtx
));
25636 /* Describe the effect of INSN to the CFI engine. */
25637 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
25638 gen_rtx_SET (end_addr
,
25639 gen_rtx_PLUS (Pmode
, stack_pointer_rtx
,
25642 RTX_FRAME_RELATED_P (insn
) = 1;
25644 /* Emit the loop. */
25646 retval
= emit_insn (gen_probe_stack_rangedi (stack_pointer_rtx
,
25647 stack_pointer_rtx
, orig_sp
,
25650 retval
= emit_insn (gen_probe_stack_rangesi (stack_pointer_rtx
,
25651 stack_pointer_rtx
, orig_sp
,
25653 RTX_FRAME_RELATED_P (retval
) = 1;
25654 /* Describe the effect of INSN to the CFI engine. */
25655 add_reg_note (retval
, REG_FRAME_RELATED_EXPR
,
25656 gen_rtx_SET (stack_pointer_rtx
, end_addr
));
25658 /* Emit a blockage to ensure the allocation/probing insns are
25659 not optimized, combined, removed, etc. Other cases handle this
25660 within their call to rs6000_emit_allocate_stack_1. */
25661 emit_insn (gen_blockage ());
25663 dump_stack_clash_frame_info (PROBE_LOOP
, rounded_size
!= orig_size
);
25666 if (orig_size
!= rounded_size
)
25668 /* Allocate (and implicitly probe) any residual space. */
25669 HOST_WIDE_INT residual
= orig_size
- rounded_size
;
25671 rtx_insn
*insn
= rs6000_emit_allocate_stack_1 (residual
, orig_sp
);
25673 /* If the residual was the only allocation, then we can return the
25674 allocating insn. */
25682 /* Emit the correct code for allocating stack space, as insns.
25683 If COPY_REG, make sure a copy of the old frame is left there.
25684 The generated code may use hard register 0 as a temporary. */
25687 rs6000_emit_allocate_stack (HOST_WIDE_INT size
, rtx copy_reg
, int copy_off
)
25690 rtx stack_reg
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
25691 rtx tmp_reg
= gen_rtx_REG (Pmode
, 0);
25692 rtx todec
= gen_int_mode (-size
, Pmode
);
25694 if (INTVAL (todec
) != -size
)
25696 warning (0, "stack frame too large");
25697 emit_insn (gen_trap ());
25701 if (crtl
->limit_stack
)
25703 if (REG_P (stack_limit_rtx
)
25704 && REGNO (stack_limit_rtx
) > 1
25705 && REGNO (stack_limit_rtx
) <= 31)
25708 = gen_add3_insn (tmp_reg
, stack_limit_rtx
, GEN_INT (size
));
25711 emit_insn (gen_cond_trap (LTU
, stack_reg
, tmp_reg
, const0_rtx
));
25713 else if (GET_CODE (stack_limit_rtx
) == SYMBOL_REF
25715 && DEFAULT_ABI
== ABI_V4
25718 rtx toload
= gen_rtx_CONST (VOIDmode
,
25719 gen_rtx_PLUS (Pmode
,
25723 emit_insn (gen_elf_high (tmp_reg
, toload
));
25724 emit_insn (gen_elf_low (tmp_reg
, tmp_reg
, toload
));
25725 emit_insn (gen_cond_trap (LTU
, stack_reg
, tmp_reg
,
25729 warning (0, "stack limit expression is not supported");
25732 if (flag_stack_clash_protection
)
25734 if (size
< get_stack_clash_protection_guard_size ())
25735 dump_stack_clash_frame_info (NO_PROBE_SMALL_FRAME
, true);
25738 rtx_insn
*insn
= rs6000_emit_probe_stack_range_stack_clash (size
,
25741 /* If we asked for a copy with an offset, then we still need add in
25743 if (copy_reg
&& copy_off
)
25744 emit_insn (gen_add3_insn (copy_reg
, copy_reg
, GEN_INT (copy_off
)));
25752 emit_insn (gen_add3_insn (copy_reg
, stack_reg
, GEN_INT (copy_off
)));
25754 emit_move_insn (copy_reg
, stack_reg
);
25757 /* Since we didn't use gen_frame_mem to generate the MEM, grab
25758 it now and set the alias set/attributes. The above gen_*_update
25759 calls will generate a PARALLEL with the MEM set being the first
25761 insn
= rs6000_emit_allocate_stack_1 (size
, stack_reg
);
25765 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
25767 #if PROBE_INTERVAL > 32768
25768 #error Cannot use indexed addressing mode for stack probing
25771 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
25772 inclusive. These are offsets from the current stack pointer. */
25775 rs6000_emit_probe_stack_range (HOST_WIDE_INT first
, HOST_WIDE_INT size
)
25777 /* See if we have a constant small number of probes to generate. If so,
25778 that's the easy case. */
25779 if (first
+ size
<= 32768)
25783 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
25784 it exceeds SIZE. If only one probe is needed, this will not
25785 generate any code. Then probe at FIRST + SIZE. */
25786 for (i
= PROBE_INTERVAL
; i
< size
; i
+= PROBE_INTERVAL
)
25787 emit_stack_probe (plus_constant (Pmode
, stack_pointer_rtx
,
25790 emit_stack_probe (plus_constant (Pmode
, stack_pointer_rtx
,
25794 /* Otherwise, do the same as above, but in a loop. Note that we must be
25795 extra careful with variables wrapping around because we might be at
25796 the very top (or the very bottom) of the address space and we have
25797 to be able to handle this case properly; in particular, we use an
25798 equality test for the loop condition. */
25801 HOST_WIDE_INT rounded_size
;
25802 rtx r12
= gen_rtx_REG (Pmode
, 12);
25803 rtx r0
= gen_rtx_REG (Pmode
, 0);
25805 /* Sanity check for the addressing mode we're going to use. */
25806 gcc_assert (first
<= 32768);
25808 /* Step 1: round SIZE to the previous multiple of the interval. */
25810 rounded_size
= ROUND_DOWN (size
, PROBE_INTERVAL
);
25813 /* Step 2: compute initial and final value of the loop counter. */
25815 /* TEST_ADDR = SP + FIRST. */
25816 emit_insn (gen_rtx_SET (r12
, plus_constant (Pmode
, stack_pointer_rtx
,
25819 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
25820 if (rounded_size
> 32768)
25822 emit_move_insn (r0
, GEN_INT (-rounded_size
));
25823 emit_insn (gen_rtx_SET (r0
, gen_rtx_PLUS (Pmode
, r12
, r0
)));
25826 emit_insn (gen_rtx_SET (r0
, plus_constant (Pmode
, r12
,
25830 /* Step 3: the loop
25834 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
25837 while (TEST_ADDR != LAST_ADDR)
25839 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
25840 until it is equal to ROUNDED_SIZE. */
25843 emit_insn (gen_probe_stack_rangedi (r12
, r12
, stack_pointer_rtx
, r0
));
25845 emit_insn (gen_probe_stack_rangesi (r12
, r12
, stack_pointer_rtx
, r0
));
25848 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
25849 that SIZE is equal to ROUNDED_SIZE. */
25851 if (size
!= rounded_size
)
25852 emit_stack_probe (plus_constant (Pmode
, r12
, rounded_size
- size
));
25856 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
25857 addresses, not offsets. */
25859 static const char *
25860 output_probe_stack_range_1 (rtx reg1
, rtx reg2
)
25862 static int labelno
= 0;
25866 ASM_GENERATE_INTERNAL_LABEL (loop_lab
, "LPSRL", labelno
++);
25869 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file
, loop_lab
);
25871 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
25873 xops
[1] = GEN_INT (-PROBE_INTERVAL
);
25874 output_asm_insn ("addi %0,%0,%1", xops
);
25876 /* Probe at TEST_ADDR. */
25877 xops
[1] = gen_rtx_REG (Pmode
, 0);
25878 output_asm_insn ("stw %1,0(%0)", xops
);
25880 /* Test if TEST_ADDR == LAST_ADDR. */
25883 output_asm_insn ("cmpd 0,%0,%1", xops
);
25885 output_asm_insn ("cmpw 0,%0,%1", xops
);
25888 fputs ("\tbne 0,", asm_out_file
);
25889 assemble_name_raw (asm_out_file
, loop_lab
);
25890 fputc ('\n', asm_out_file
);
25895 /* This function is called when rs6000_frame_related is processing
25896 SETs within a PARALLEL, and returns whether the REGNO save ought to
25897 be marked RTX_FRAME_RELATED_P. The PARALLELs involved are those
25898 for out-of-line register save functions, store multiple, and the
25899 Darwin world_save. They may contain registers that don't really
25903 interesting_frame_related_regno (unsigned int regno
)
25905 /* Saves apparently of r0 are actually saving LR. It doesn't make
25906 sense to substitute the regno here to test save_reg_p (LR_REGNO).
25907 We *know* LR needs saving, and dwarf2cfi.c is able to deduce that
25908 (set (mem) (r0)) is saving LR from a prior (set (r0) (lr)) marked
25909 as frame related. */
25912 /* If we see CR2 then we are here on a Darwin world save. Saves of
25913 CR2 signify the whole CR is being saved. This is a long-standing
25914 ABI wart fixed by ELFv2. As for r0/lr there is no need to check
25915 that CR needs to be saved. */
25916 if (regno
== CR2_REGNO
)
25918 /* Omit frame info for any user-defined global regs. If frame info
25919 is supplied for them, frame unwinding will restore a user reg.
25920 Also omit frame info for any reg we don't need to save, as that
25921 bloats frame info and can cause problems with shrink wrapping.
25922 Since global regs won't be seen as needing to be saved, both of
25923 these conditions are covered by save_reg_p. */
25924 return save_reg_p (regno
);
25927 /* Probe a range of stack addresses from REG1 to REG3 inclusive. These are
25928 addresses, not offsets.
25930 REG2 contains the backchain that must be stored into *sp at each allocation.
25932 This is subtly different than the Ada probing above in that it tries hard
25933 to prevent attacks that jump the stack guard. Thus, it is never allowed
25934 to allocate more than PROBE_INTERVAL bytes of stack space without a
25937 static const char *
25938 output_probe_stack_range_stack_clash (rtx reg1
, rtx reg2
, rtx reg3
)
25940 static int labelno
= 0;
25944 HOST_WIDE_INT probe_interval
= get_stack_clash_protection_probe_interval ();
25946 ASM_GENERATE_INTERNAL_LABEL (loop_lab
, "LPSRL", labelno
++);
25948 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file
, loop_lab
);
25950 /* This allocates and probes. */
25953 xops
[2] = GEN_INT (-probe_interval
);
25955 output_asm_insn ("stdu %1,%2(%0)", xops
);
25957 output_asm_insn ("stwu %1,%2(%0)", xops
);
25959 /* Jump to LOOP_LAB if TEST_ADDR != LAST_ADDR. */
25963 output_asm_insn ("cmpd 0,%0,%1", xops
);
25965 output_asm_insn ("cmpw 0,%0,%1", xops
);
25967 fputs ("\tbne 0,", asm_out_file
);
25968 assemble_name_raw (asm_out_file
, loop_lab
);
25969 fputc ('\n', asm_out_file
);
25974 /* Wrapper around the output_probe_stack_range routines. */
25976 output_probe_stack_range (rtx reg1
, rtx reg2
, rtx reg3
)
25978 if (flag_stack_clash_protection
)
25979 return output_probe_stack_range_stack_clash (reg1
, reg2
, reg3
);
25981 return output_probe_stack_range_1 (reg1
, reg3
);
25984 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
25985 with (plus:P (reg 1) VAL), and with REG2 replaced with REPL2 if REG2
25986 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
25987 deduce these equivalences by itself so it wasn't necessary to hold
25988 its hand so much. Don't be tempted to always supply d2_f_d_e with
25989 the actual cfa register, ie. r31 when we are using a hard frame
25990 pointer. That fails when saving regs off r1, and sched moves the
25991 r31 setup past the reg saves. */
25994 rs6000_frame_related (rtx_insn
*insn
, rtx reg
, HOST_WIDE_INT val
,
25995 rtx reg2
, rtx repl2
)
25999 if (REGNO (reg
) == STACK_POINTER_REGNUM
)
26001 gcc_checking_assert (val
== 0);
26005 repl
= gen_rtx_PLUS (Pmode
, gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
),
26008 rtx pat
= PATTERN (insn
);
26009 if (!repl
&& !reg2
)
26011 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
26012 if (GET_CODE (pat
) == PARALLEL
)
26013 for (int i
= 0; i
< XVECLEN (pat
, 0); i
++)
26014 if (GET_CODE (XVECEXP (pat
, 0, i
)) == SET
)
26016 rtx set
= XVECEXP (pat
, 0, i
);
26018 if (!REG_P (SET_SRC (set
))
26019 || interesting_frame_related_regno (REGNO (SET_SRC (set
))))
26020 RTX_FRAME_RELATED_P (set
) = 1;
26022 RTX_FRAME_RELATED_P (insn
) = 1;
26026 /* We expect that 'pat' is either a SET or a PARALLEL containing
26027 SETs (and possibly other stuff). In a PARALLEL, all the SETs
26028 are important so they all have to be marked RTX_FRAME_RELATED_P.
26029 Call simplify_replace_rtx on the SETs rather than the whole insn
26030 so as to leave the other stuff alone (for example USE of r12). */
26032 set_used_flags (pat
);
26033 if (GET_CODE (pat
) == SET
)
26036 pat
= simplify_replace_rtx (pat
, reg
, repl
);
26038 pat
= simplify_replace_rtx (pat
, reg2
, repl2
);
26040 else if (GET_CODE (pat
) == PARALLEL
)
26042 pat
= shallow_copy_rtx (pat
);
26043 XVEC (pat
, 0) = shallow_copy_rtvec (XVEC (pat
, 0));
26045 for (int i
= 0; i
< XVECLEN (pat
, 0); i
++)
26046 if (GET_CODE (XVECEXP (pat
, 0, i
)) == SET
)
26048 rtx set
= XVECEXP (pat
, 0, i
);
26051 set
= simplify_replace_rtx (set
, reg
, repl
);
26053 set
= simplify_replace_rtx (set
, reg2
, repl2
);
26054 XVECEXP (pat
, 0, i
) = set
;
26056 if (!REG_P (SET_SRC (set
))
26057 || interesting_frame_related_regno (REGNO (SET_SRC (set
))))
26058 RTX_FRAME_RELATED_P (set
) = 1;
26062 gcc_unreachable ();
26064 RTX_FRAME_RELATED_P (insn
) = 1;
26065 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, copy_rtx_if_shared (pat
));
26070 /* Returns an insn that has a vrsave set operation with the
26071 appropriate CLOBBERs. */
26074 generate_set_vrsave (rtx reg
, rs6000_stack_t
*info
, int epiloguep
)
26077 rtx insn
, clobs
[TOTAL_ALTIVEC_REGS
+ 1];
26078 rtx vrsave
= gen_rtx_REG (SImode
, VRSAVE_REGNO
);
26081 = gen_rtx_SET (vrsave
,
26082 gen_rtx_UNSPEC_VOLATILE (SImode
,
26083 gen_rtvec (2, reg
, vrsave
),
26084 UNSPECV_SET_VRSAVE
));
26088 /* We need to clobber the registers in the mask so the scheduler
26089 does not move sets to VRSAVE before sets of AltiVec registers.
26091 However, if the function receives nonlocal gotos, reload will set
26092 all call saved registers live. We will end up with:
26094 (set (reg 999) (mem))
26095 (parallel [ (set (reg vrsave) (unspec blah))
26096 (clobber (reg 999))])
26098 The clobber will cause the store into reg 999 to be dead, and
26099 flow will attempt to delete an epilogue insn. In this case, we
26100 need an unspec use/set of the register. */
26102 for (i
= FIRST_ALTIVEC_REGNO
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
26103 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
26105 if (!epiloguep
|| call_used_regs
[i
])
26106 clobs
[nclobs
++] = gen_rtx_CLOBBER (VOIDmode
,
26107 gen_rtx_REG (V4SImode
, i
));
26110 rtx reg
= gen_rtx_REG (V4SImode
, i
);
26113 = gen_rtx_SET (reg
,
26114 gen_rtx_UNSPEC (V4SImode
,
26115 gen_rtvec (1, reg
), 27));
26119 insn
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (nclobs
));
26121 for (i
= 0; i
< nclobs
; ++i
)
26122 XVECEXP (insn
, 0, i
) = clobs
[i
];
26128 gen_frame_set (rtx reg
, rtx frame_reg
, int offset
, bool store
)
26132 addr
= gen_rtx_PLUS (Pmode
, frame_reg
, GEN_INT (offset
));
26133 mem
= gen_frame_mem (GET_MODE (reg
), addr
);
26134 return gen_rtx_SET (store
? mem
: reg
, store
? reg
: mem
);
26138 gen_frame_load (rtx reg
, rtx frame_reg
, int offset
)
26140 return gen_frame_set (reg
, frame_reg
, offset
, false);
26144 gen_frame_store (rtx reg
, rtx frame_reg
, int offset
)
26146 return gen_frame_set (reg
, frame_reg
, offset
, true);
26149 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
26150 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
26153 emit_frame_save (rtx frame_reg
, machine_mode mode
,
26154 unsigned int regno
, int offset
, HOST_WIDE_INT frame_reg_to_sp
)
26158 /* Some cases that need register indexed addressing. */
26159 gcc_checking_assert (!(TARGET_ALTIVEC_ABI
&& ALTIVEC_VECTOR_MODE (mode
))
26160 || (TARGET_VSX
&& ALTIVEC_OR_VSX_VECTOR_MODE (mode
)));
26162 reg
= gen_rtx_REG (mode
, regno
);
26163 rtx_insn
*insn
= emit_insn (gen_frame_store (reg
, frame_reg
, offset
));
26164 return rs6000_frame_related (insn
, frame_reg
, frame_reg_to_sp
,
26165 NULL_RTX
, NULL_RTX
);
26168 /* Emit an offset memory reference suitable for a frame store, while
26169 converting to a valid addressing mode. */
26172 gen_frame_mem_offset (machine_mode mode
, rtx reg
, int offset
)
26174 return gen_frame_mem (mode
, gen_rtx_PLUS (Pmode
, reg
, GEN_INT (offset
)));
26177 #ifndef TARGET_FIX_AND_CONTINUE
26178 #define TARGET_FIX_AND_CONTINUE 0
26181 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
26182 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
26183 #define LAST_SAVRES_REGISTER 31
26184 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
26195 static GTY(()) rtx savres_routine_syms
[N_SAVRES_REGISTERS
][12];
26197 /* Temporary holding space for an out-of-line register save/restore
26199 static char savres_routine_name
[30];
26201 /* Return the name for an out-of-line register save/restore routine.
26202 We are saving/restoring GPRs if GPR is true. */
26205 rs6000_savres_routine_name (int regno
, int sel
)
26207 const char *prefix
= "";
26208 const char *suffix
= "";
26210 /* Different targets are supposed to define
26211 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
26212 routine name could be defined with:
26214 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
26216 This is a nice idea in practice, but in reality, things are
26217 complicated in several ways:
26219 - ELF targets have save/restore routines for GPRs.
26221 - PPC64 ELF targets have routines for save/restore of GPRs that
26222 differ in what they do with the link register, so having a set
26223 prefix doesn't work. (We only use one of the save routines at
26224 the moment, though.)
26226 - PPC32 elf targets have "exit" versions of the restore routines
26227 that restore the link register and can save some extra space.
26228 These require an extra suffix. (There are also "tail" versions
26229 of the restore routines and "GOT" versions of the save routines,
26230 but we don't generate those at present. Same problems apply,
26233 We deal with all this by synthesizing our own prefix/suffix and
26234 using that for the simple sprintf call shown above. */
26235 if (DEFAULT_ABI
== ABI_V4
)
26240 if ((sel
& SAVRES_REG
) == SAVRES_GPR
)
26241 prefix
= (sel
& SAVRES_SAVE
) ? "_savegpr_" : "_restgpr_";
26242 else if ((sel
& SAVRES_REG
) == SAVRES_FPR
)
26243 prefix
= (sel
& SAVRES_SAVE
) ? "_savefpr_" : "_restfpr_";
26244 else if ((sel
& SAVRES_REG
) == SAVRES_VR
)
26245 prefix
= (sel
& SAVRES_SAVE
) ? "_savevr_" : "_restvr_";
26249 if ((sel
& SAVRES_LR
))
26252 else if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
26254 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
26255 /* No out-of-line save/restore routines for GPRs on AIX. */
26256 gcc_assert (!TARGET_AIX
|| (sel
& SAVRES_REG
) != SAVRES_GPR
);
26260 if ((sel
& SAVRES_REG
) == SAVRES_GPR
)
26261 prefix
= ((sel
& SAVRES_SAVE
)
26262 ? ((sel
& SAVRES_LR
) ? "_savegpr0_" : "_savegpr1_")
26263 : ((sel
& SAVRES_LR
) ? "_restgpr0_" : "_restgpr1_"));
26264 else if ((sel
& SAVRES_REG
) == SAVRES_FPR
)
26266 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
26267 if ((sel
& SAVRES_LR
))
26268 prefix
= ((sel
& SAVRES_SAVE
) ? "_savefpr_" : "_restfpr_");
26272 prefix
= (sel
& SAVRES_SAVE
) ? SAVE_FP_PREFIX
: RESTORE_FP_PREFIX
;
26273 suffix
= (sel
& SAVRES_SAVE
) ? SAVE_FP_SUFFIX
: RESTORE_FP_SUFFIX
;
26276 else if ((sel
& SAVRES_REG
) == SAVRES_VR
)
26277 prefix
= (sel
& SAVRES_SAVE
) ? "_savevr_" : "_restvr_";
26282 if (DEFAULT_ABI
== ABI_DARWIN
)
26284 /* The Darwin approach is (slightly) different, in order to be
26285 compatible with code generated by the system toolchain. There is a
26286 single symbol for the start of save sequence, and the code here
26287 embeds an offset into that code on the basis of the first register
26289 prefix
= (sel
& SAVRES_SAVE
) ? "save" : "rest" ;
26290 if ((sel
& SAVRES_REG
) == SAVRES_GPR
)
26291 sprintf (savres_routine_name
, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix
,
26292 ((sel
& SAVRES_LR
) ? "x" : ""), (regno
== 13 ? "" : "+"),
26293 (regno
- 13) * 4, prefix
, regno
);
26294 else if ((sel
& SAVRES_REG
) == SAVRES_FPR
)
26295 sprintf (savres_routine_name
, "*%sFP%s%.0d ; %s f%d-f31", prefix
,
26296 (regno
== 14 ? "" : "+"), (regno
- 14) * 4, prefix
, regno
);
26297 else if ((sel
& SAVRES_REG
) == SAVRES_VR
)
26298 sprintf (savres_routine_name
, "*%sVEC%s%.0d ; %s v%d-v31", prefix
,
26299 (regno
== 20 ? "" : "+"), (regno
- 20) * 8, prefix
, regno
);
26304 sprintf (savres_routine_name
, "%s%d%s", prefix
, regno
, suffix
);
26306 return savres_routine_name
;
26309 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
26310 We are saving/restoring GPRs if GPR is true. */
26313 rs6000_savres_routine_sym (rs6000_stack_t
*info
, int sel
)
26315 int regno
= ((sel
& SAVRES_REG
) == SAVRES_GPR
26316 ? info
->first_gp_reg_save
26317 : (sel
& SAVRES_REG
) == SAVRES_FPR
26318 ? info
->first_fp_reg_save
- 32
26319 : (sel
& SAVRES_REG
) == SAVRES_VR
26320 ? info
->first_altivec_reg_save
- FIRST_ALTIVEC_REGNO
26325 /* Don't generate bogus routine names. */
26326 gcc_assert (FIRST_SAVRES_REGISTER
<= regno
26327 && regno
<= LAST_SAVRES_REGISTER
26328 && select
>= 0 && select
<= 12);
26330 sym
= savres_routine_syms
[regno
-FIRST_SAVRES_REGISTER
][select
];
26336 name
= rs6000_savres_routine_name (regno
, sel
);
26338 sym
= savres_routine_syms
[regno
-FIRST_SAVRES_REGISTER
][select
]
26339 = gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (name
));
26340 SYMBOL_REF_FLAGS (sym
) |= SYMBOL_FLAG_FUNCTION
;
26346 /* Emit a sequence of insns, including a stack tie if needed, for
26347 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
26348 reset the stack pointer, but move the base of the frame into
26349 reg UPDT_REGNO for use by out-of-line register restore routines. */
26352 rs6000_emit_stack_reset (rtx frame_reg_rtx
, HOST_WIDE_INT frame_off
,
26353 unsigned updt_regno
)
26355 /* If there is nothing to do, don't do anything. */
26356 if (frame_off
== 0 && REGNO (frame_reg_rtx
) == updt_regno
)
26359 rtx updt_reg_rtx
= gen_rtx_REG (Pmode
, updt_regno
);
26361 /* This blockage is needed so that sched doesn't decide to move
26362 the sp change before the register restores. */
26363 if (DEFAULT_ABI
== ABI_V4
)
26364 return emit_insn (gen_stack_restore_tie (updt_reg_rtx
, frame_reg_rtx
,
26365 GEN_INT (frame_off
)));
26367 /* If we are restoring registers out-of-line, we will be using the
26368 "exit" variants of the restore routines, which will reset the
26369 stack for us. But we do need to point updt_reg into the
26370 right place for those routines. */
26371 if (frame_off
!= 0)
26372 return emit_insn (gen_add3_insn (updt_reg_rtx
,
26373 frame_reg_rtx
, GEN_INT (frame_off
)));
26375 return emit_move_insn (updt_reg_rtx
, frame_reg_rtx
);
26380 /* Return the register number used as a pointer by out-of-line
26381 save/restore functions. */
26383 static inline unsigned
26384 ptr_regno_for_savres (int sel
)
26386 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
26387 return (sel
& SAVRES_REG
) == SAVRES_FPR
|| (sel
& SAVRES_LR
) ? 1 : 12;
26388 return DEFAULT_ABI
== ABI_DARWIN
&& (sel
& SAVRES_REG
) == SAVRES_FPR
? 1 : 11;
26391 /* Construct a parallel rtx describing the effect of a call to an
26392 out-of-line register save/restore routine, and emit the insn
26393 or jump_insn as appropriate. */
26396 rs6000_emit_savres_rtx (rs6000_stack_t
*info
,
26397 rtx frame_reg_rtx
, int save_area_offset
, int lr_offset
,
26398 machine_mode reg_mode
, int sel
)
26401 int offset
, start_reg
, end_reg
, n_regs
, use_reg
;
26402 int reg_size
= GET_MODE_SIZE (reg_mode
);
26409 start_reg
= ((sel
& SAVRES_REG
) == SAVRES_GPR
26410 ? info
->first_gp_reg_save
26411 : (sel
& SAVRES_REG
) == SAVRES_FPR
26412 ? info
->first_fp_reg_save
26413 : (sel
& SAVRES_REG
) == SAVRES_VR
26414 ? info
->first_altivec_reg_save
26416 end_reg
= ((sel
& SAVRES_REG
) == SAVRES_GPR
26418 : (sel
& SAVRES_REG
) == SAVRES_FPR
26420 : (sel
& SAVRES_REG
) == SAVRES_VR
26421 ? LAST_ALTIVEC_REGNO
+ 1
26423 n_regs
= end_reg
- start_reg
;
26424 p
= rtvec_alloc (3 + ((sel
& SAVRES_LR
) ? 1 : 0)
26425 + ((sel
& SAVRES_REG
) == SAVRES_VR
? 1 : 0)
26428 if (!(sel
& SAVRES_SAVE
) && (sel
& SAVRES_LR
))
26429 RTVEC_ELT (p
, offset
++) = ret_rtx
;
26431 RTVEC_ELT (p
, offset
++)
26432 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, LR_REGNO
));
26434 sym
= rs6000_savres_routine_sym (info
, sel
);
26435 RTVEC_ELT (p
, offset
++) = gen_rtx_USE (VOIDmode
, sym
);
26437 use_reg
= ptr_regno_for_savres (sel
);
26438 if ((sel
& SAVRES_REG
) == SAVRES_VR
)
26440 /* Vector regs are saved/restored using [reg+reg] addressing. */
26441 RTVEC_ELT (p
, offset
++)
26442 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, use_reg
));
26443 RTVEC_ELT (p
, offset
++)
26444 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (Pmode
, 0));
26447 RTVEC_ELT (p
, offset
++)
26448 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (Pmode
, use_reg
));
26450 for (i
= 0; i
< end_reg
- start_reg
; i
++)
26451 RTVEC_ELT (p
, i
+ offset
)
26452 = gen_frame_set (gen_rtx_REG (reg_mode
, start_reg
+ i
),
26453 frame_reg_rtx
, save_area_offset
+ reg_size
* i
,
26454 (sel
& SAVRES_SAVE
) != 0);
26456 if ((sel
& SAVRES_SAVE
) && (sel
& SAVRES_LR
))
26457 RTVEC_ELT (p
, i
+ offset
)
26458 = gen_frame_store (gen_rtx_REG (Pmode
, 0), frame_reg_rtx
, lr_offset
);
26460 par
= gen_rtx_PARALLEL (VOIDmode
, p
);
26462 if (!(sel
& SAVRES_SAVE
) && (sel
& SAVRES_LR
))
26464 insn
= emit_jump_insn (par
);
26465 JUMP_LABEL (insn
) = ret_rtx
;
26468 insn
= emit_insn (par
);
26472 /* Emit prologue code to store CR fields that need to be saved into REG. This
26473 function should only be called when moving the non-volatile CRs to REG, it
26474 is not a general purpose routine to move the entire set of CRs to REG.
26475 Specifically, gen_prologue_movesi_from_cr() does not contain uses of the
26479 rs6000_emit_prologue_move_from_cr (rtx reg
)
26481 /* Only the ELFv2 ABI allows storing only selected fields. */
26482 if (DEFAULT_ABI
== ABI_ELFv2
&& TARGET_MFCRF
)
26484 int i
, cr_reg
[8], count
= 0;
26486 /* Collect CR fields that must be saved. */
26487 for (i
= 0; i
< 8; i
++)
26488 if (save_reg_p (CR0_REGNO
+ i
))
26489 cr_reg
[count
++] = i
;
26491 /* If it's just a single one, use mfcrf. */
26494 rtvec p
= rtvec_alloc (1);
26495 rtvec r
= rtvec_alloc (2);
26496 RTVEC_ELT (r
, 0) = gen_rtx_REG (CCmode
, CR0_REGNO
+ cr_reg
[0]);
26497 RTVEC_ELT (r
, 1) = GEN_INT (1 << (7 - cr_reg
[0]));
26499 = gen_rtx_SET (reg
,
26500 gen_rtx_UNSPEC (SImode
, r
, UNSPEC_MOVESI_FROM_CR
));
26502 emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
26506 /* ??? It might be better to handle count == 2 / 3 cases here
26507 as well, using logical operations to combine the values. */
26510 emit_insn (gen_prologue_movesi_from_cr (reg
));
26513 /* Return whether the split-stack arg pointer (r12) is used. */
26516 split_stack_arg_pointer_used_p (void)
26518 /* If the pseudo holding the arg pointer is no longer a pseudo,
26519 then the arg pointer is used. */
26520 if (cfun
->machine
->split_stack_arg_pointer
!= NULL_RTX
26521 && (!REG_P (cfun
->machine
->split_stack_arg_pointer
)
26522 || (REGNO (cfun
->machine
->split_stack_arg_pointer
)
26523 < FIRST_PSEUDO_REGISTER
)))
26526 /* Unfortunately we also need to do some code scanning, since
26527 r12 may have been substituted for the pseudo. */
26529 basic_block bb
= ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
;
26530 FOR_BB_INSNS (bb
, insn
)
26531 if (NONDEBUG_INSN_P (insn
))
26533 /* A call destroys r12. */
26538 FOR_EACH_INSN_USE (use
, insn
)
26540 rtx x
= DF_REF_REG (use
);
26541 if (REG_P (x
) && REGNO (x
) == 12)
26545 FOR_EACH_INSN_DEF (def
, insn
)
26547 rtx x
= DF_REF_REG (def
);
26548 if (REG_P (x
) && REGNO (x
) == 12)
26552 return bitmap_bit_p (DF_LR_OUT (bb
), 12);
26555 /* Return whether we need to emit an ELFv2 global entry point prologue. */
26558 rs6000_global_entry_point_needed_p (void)
26560 /* Only needed for the ELFv2 ABI. */
26561 if (DEFAULT_ABI
!= ABI_ELFv2
)
26564 /* With -msingle-pic-base, we assume the whole program shares the same
26565 TOC, so no global entry point prologues are needed anywhere. */
26566 if (TARGET_SINGLE_PIC_BASE
)
26569 /* Ensure we have a global entry point for thunks. ??? We could
26570 avoid that if the target routine doesn't need a global entry point,
26571 but we do not know whether this is the case at this point. */
26572 if (cfun
->is_thunk
)
26575 /* For regular functions, rs6000_emit_prologue sets this flag if the
26576 routine ever uses the TOC pointer. */
26577 return cfun
->machine
->r2_setup_needed
;
26580 /* Implement TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS. */
26582 rs6000_get_separate_components (void)
26584 rs6000_stack_t
*info
= rs6000_stack_info ();
26586 if (WORLD_SAVE_P (info
))
26589 gcc_assert (!(info
->savres_strategy
& SAVE_MULTIPLE
)
26590 && !(info
->savres_strategy
& REST_MULTIPLE
));
26592 /* Component 0 is the save/restore of LR (done via GPR0).
26593 Components 13..31 are the save/restore of GPR13..GPR31.
26594 Components 46..63 are the save/restore of FPR14..FPR31. */
26596 cfun
->machine
->n_components
= 64;
26598 sbitmap components
= sbitmap_alloc (cfun
->machine
->n_components
);
26599 bitmap_clear (components
);
26601 int reg_size
= TARGET_32BIT
? 4 : 8;
26602 int fp_reg_size
= 8;
26604 /* The GPRs we need saved to the frame. */
26605 if ((info
->savres_strategy
& SAVE_INLINE_GPRS
)
26606 && (info
->savres_strategy
& REST_INLINE_GPRS
))
26608 int offset
= info
->gp_save_offset
;
26610 offset
+= info
->total_size
;
26612 for (unsigned regno
= info
->first_gp_reg_save
; regno
< 32; regno
++)
26614 if (IN_RANGE (offset
, -0x8000, 0x7fff)
26615 && save_reg_p (regno
))
26616 bitmap_set_bit (components
, regno
);
26618 offset
+= reg_size
;
26622 /* Don't mess with the hard frame pointer. */
26623 if (frame_pointer_needed
)
26624 bitmap_clear_bit (components
, HARD_FRAME_POINTER_REGNUM
);
26626 /* Don't mess with the fixed TOC register. */
26627 if ((TARGET_TOC
&& TARGET_MINIMAL_TOC
)
26628 || (flag_pic
== 1 && DEFAULT_ABI
== ABI_V4
)
26629 || (flag_pic
&& DEFAULT_ABI
== ABI_DARWIN
))
26630 bitmap_clear_bit (components
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
26632 /* The FPRs we need saved to the frame. */
26633 if ((info
->savres_strategy
& SAVE_INLINE_FPRS
)
26634 && (info
->savres_strategy
& REST_INLINE_FPRS
))
26636 int offset
= info
->fp_save_offset
;
26638 offset
+= info
->total_size
;
26640 for (unsigned regno
= info
->first_fp_reg_save
; regno
< 64; regno
++)
26642 if (IN_RANGE (offset
, -0x8000, 0x7fff) && save_reg_p (regno
))
26643 bitmap_set_bit (components
, regno
);
26645 offset
+= fp_reg_size
;
26649 /* Optimize LR save and restore if we can. This is component 0. Any
26650 out-of-line register save/restore routines need LR. */
26651 if (info
->lr_save_p
26652 && !(flag_pic
&& (DEFAULT_ABI
== ABI_V4
|| DEFAULT_ABI
== ABI_DARWIN
))
26653 && (info
->savres_strategy
& SAVE_INLINE_GPRS
)
26654 && (info
->savres_strategy
& REST_INLINE_GPRS
)
26655 && (info
->savres_strategy
& SAVE_INLINE_FPRS
)
26656 && (info
->savres_strategy
& REST_INLINE_FPRS
)
26657 && (info
->savres_strategy
& SAVE_INLINE_VRS
)
26658 && (info
->savres_strategy
& REST_INLINE_VRS
))
26660 int offset
= info
->lr_save_offset
;
26662 offset
+= info
->total_size
;
26663 if (IN_RANGE (offset
, -0x8000, 0x7fff))
26664 bitmap_set_bit (components
, 0);
26670 /* Implement TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB. */
26672 rs6000_components_for_bb (basic_block bb
)
26674 rs6000_stack_t
*info
= rs6000_stack_info ();
26676 bitmap in
= DF_LIVE_IN (bb
);
26677 bitmap gen
= &DF_LIVE_BB_INFO (bb
)->gen
;
26678 bitmap kill
= &DF_LIVE_BB_INFO (bb
)->kill
;
26680 sbitmap components
= sbitmap_alloc (cfun
->machine
->n_components
);
26681 bitmap_clear (components
);
26683 /* A register is used in a bb if it is in the IN, GEN, or KILL sets. */
26686 for (unsigned regno
= info
->first_gp_reg_save
; regno
< 32; regno
++)
26687 if (bitmap_bit_p (in
, regno
)
26688 || bitmap_bit_p (gen
, regno
)
26689 || bitmap_bit_p (kill
, regno
))
26690 bitmap_set_bit (components
, regno
);
26693 for (unsigned regno
= info
->first_fp_reg_save
; regno
< 64; regno
++)
26694 if (bitmap_bit_p (in
, regno
)
26695 || bitmap_bit_p (gen
, regno
)
26696 || bitmap_bit_p (kill
, regno
))
26697 bitmap_set_bit (components
, regno
);
26699 /* The link register. */
26700 if (bitmap_bit_p (in
, LR_REGNO
)
26701 || bitmap_bit_p (gen
, LR_REGNO
)
26702 || bitmap_bit_p (kill
, LR_REGNO
))
26703 bitmap_set_bit (components
, 0);
26708 /* Implement TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS. */
26710 rs6000_disqualify_components (sbitmap components
, edge e
,
26711 sbitmap edge_components
, bool /*is_prologue*/)
26713 /* Our LR pro/epilogue code moves LR via R0, so R0 had better not be
26714 live where we want to place that code. */
26715 if (bitmap_bit_p (edge_components
, 0)
26716 && bitmap_bit_p (DF_LIVE_IN (e
->dest
), 0))
26719 fprintf (dump_file
, "Disqualifying LR because GPR0 is live "
26720 "on entry to bb %d\n", e
->dest
->index
);
26721 bitmap_clear_bit (components
, 0);
26725 /* Implement TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS. */
26727 rs6000_emit_prologue_components (sbitmap components
)
26729 rs6000_stack_t
*info
= rs6000_stack_info ();
26730 rtx ptr_reg
= gen_rtx_REG (Pmode
, frame_pointer_needed
26731 ? HARD_FRAME_POINTER_REGNUM
26732 : STACK_POINTER_REGNUM
);
26734 machine_mode reg_mode
= Pmode
;
26735 int reg_size
= TARGET_32BIT
? 4 : 8;
26736 machine_mode fp_reg_mode
= (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
26738 int fp_reg_size
= 8;
26740 /* Prologue for LR. */
26741 if (bitmap_bit_p (components
, 0))
26743 rtx reg
= gen_rtx_REG (reg_mode
, 0);
26744 rtx_insn
*insn
= emit_move_insn (reg
, gen_rtx_REG (reg_mode
, LR_REGNO
));
26745 RTX_FRAME_RELATED_P (insn
) = 1;
26746 add_reg_note (insn
, REG_CFA_REGISTER
, NULL
);
26748 int offset
= info
->lr_save_offset
;
26750 offset
+= info
->total_size
;
26752 insn
= emit_insn (gen_frame_store (reg
, ptr_reg
, offset
));
26753 RTX_FRAME_RELATED_P (insn
) = 1;
26754 rtx lr
= gen_rtx_REG (reg_mode
, LR_REGNO
);
26755 rtx mem
= copy_rtx (SET_DEST (single_set (insn
)));
26756 add_reg_note (insn
, REG_CFA_OFFSET
, gen_rtx_SET (mem
, lr
));
26759 /* Prologue for the GPRs. */
26760 int offset
= info
->gp_save_offset
;
26762 offset
+= info
->total_size
;
26764 for (int i
= info
->first_gp_reg_save
; i
< 32; i
++)
26766 if (bitmap_bit_p (components
, i
))
26768 rtx reg
= gen_rtx_REG (reg_mode
, i
);
26769 rtx_insn
*insn
= emit_insn (gen_frame_store (reg
, ptr_reg
, offset
));
26770 RTX_FRAME_RELATED_P (insn
) = 1;
26771 rtx set
= copy_rtx (single_set (insn
));
26772 add_reg_note (insn
, REG_CFA_OFFSET
, set
);
26775 offset
+= reg_size
;
26778 /* Prologue for the FPRs. */
26779 offset
= info
->fp_save_offset
;
26781 offset
+= info
->total_size
;
26783 for (int i
= info
->first_fp_reg_save
; i
< 64; i
++)
26785 if (bitmap_bit_p (components
, i
))
26787 rtx reg
= gen_rtx_REG (fp_reg_mode
, i
);
26788 rtx_insn
*insn
= emit_insn (gen_frame_store (reg
, ptr_reg
, offset
));
26789 RTX_FRAME_RELATED_P (insn
) = 1;
26790 rtx set
= copy_rtx (single_set (insn
));
26791 add_reg_note (insn
, REG_CFA_OFFSET
, set
);
26794 offset
+= fp_reg_size
;
26798 /* Implement TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS. */
26800 rs6000_emit_epilogue_components (sbitmap components
)
26802 rs6000_stack_t
*info
= rs6000_stack_info ();
26803 rtx ptr_reg
= gen_rtx_REG (Pmode
, frame_pointer_needed
26804 ? HARD_FRAME_POINTER_REGNUM
26805 : STACK_POINTER_REGNUM
);
26807 machine_mode reg_mode
= Pmode
;
26808 int reg_size
= TARGET_32BIT
? 4 : 8;
26810 machine_mode fp_reg_mode
= (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
26812 int fp_reg_size
= 8;
26814 /* Epilogue for the FPRs. */
26815 int offset
= info
->fp_save_offset
;
26817 offset
+= info
->total_size
;
26819 for (int i
= info
->first_fp_reg_save
; i
< 64; i
++)
26821 if (bitmap_bit_p (components
, i
))
26823 rtx reg
= gen_rtx_REG (fp_reg_mode
, i
);
26824 rtx_insn
*insn
= emit_insn (gen_frame_load (reg
, ptr_reg
, offset
));
26825 RTX_FRAME_RELATED_P (insn
) = 1;
26826 add_reg_note (insn
, REG_CFA_RESTORE
, reg
);
26829 offset
+= fp_reg_size
;
26832 /* Epilogue for the GPRs. */
26833 offset
= info
->gp_save_offset
;
26835 offset
+= info
->total_size
;
26837 for (int i
= info
->first_gp_reg_save
; i
< 32; i
++)
26839 if (bitmap_bit_p (components
, i
))
26841 rtx reg
= gen_rtx_REG (reg_mode
, i
);
26842 rtx_insn
*insn
= emit_insn (gen_frame_load (reg
, ptr_reg
, offset
));
26843 RTX_FRAME_RELATED_P (insn
) = 1;
26844 add_reg_note (insn
, REG_CFA_RESTORE
, reg
);
26847 offset
+= reg_size
;
26850 /* Epilogue for LR. */
26851 if (bitmap_bit_p (components
, 0))
26853 int offset
= info
->lr_save_offset
;
26855 offset
+= info
->total_size
;
26857 rtx reg
= gen_rtx_REG (reg_mode
, 0);
26858 rtx_insn
*insn
= emit_insn (gen_frame_load (reg
, ptr_reg
, offset
));
26860 rtx lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
26861 insn
= emit_move_insn (lr
, reg
);
26862 RTX_FRAME_RELATED_P (insn
) = 1;
26863 add_reg_note (insn
, REG_CFA_RESTORE
, lr
);
26867 /* Implement TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS. */
26869 rs6000_set_handled_components (sbitmap components
)
26871 rs6000_stack_t
*info
= rs6000_stack_info ();
26873 for (int i
= info
->first_gp_reg_save
; i
< 32; i
++)
26874 if (bitmap_bit_p (components
, i
))
26875 cfun
->machine
->gpr_is_wrapped_separately
[i
] = true;
26877 for (int i
= info
->first_fp_reg_save
; i
< 64; i
++)
26878 if (bitmap_bit_p (components
, i
))
26879 cfun
->machine
->fpr_is_wrapped_separately
[i
- 32] = true;
26881 if (bitmap_bit_p (components
, 0))
26882 cfun
->machine
->lr_is_wrapped_separately
= true;
26885 /* VRSAVE is a bit vector representing which AltiVec registers
26886 are used. The OS uses this to determine which vector
26887 registers to save on a context switch. We need to save
26888 VRSAVE on the stack frame, add whatever AltiVec registers we
26889 used in this function, and do the corresponding magic in the
26892 emit_vrsave_prologue (rs6000_stack_t
*info
, int save_regno
,
26893 HOST_WIDE_INT frame_off
, rtx frame_reg_rtx
)
26895 /* Get VRSAVE into a GPR. */
26896 rtx reg
= gen_rtx_REG (SImode
, save_regno
);
26897 rtx vrsave
= gen_rtx_REG (SImode
, VRSAVE_REGNO
);
26899 emit_insn (gen_get_vrsave_internal (reg
));
26901 emit_insn (gen_rtx_SET (reg
, vrsave
));
26904 int offset
= info
->vrsave_save_offset
+ frame_off
;
26905 emit_insn (gen_frame_store (reg
, frame_reg_rtx
, offset
));
26907 /* Include the registers in the mask. */
26908 emit_insn (gen_iorsi3 (reg
, reg
, GEN_INT (info
->vrsave_mask
)));
26910 emit_insn (generate_set_vrsave (reg
, info
, 0));
26913 /* Set up the arg pointer (r12) for -fsplit-stack code. If __morestack was
26914 called, it left the arg pointer to the old stack in r29. Otherwise, the
26915 arg pointer is the top of the current frame. */
26917 emit_split_stack_prologue (rs6000_stack_t
*info
, rtx_insn
*sp_adjust
,
26918 HOST_WIDE_INT frame_off
, rtx frame_reg_rtx
)
26920 cfun
->machine
->split_stack_argp_used
= true;
26924 rtx r12
= gen_rtx_REG (Pmode
, 12);
26925 rtx sp_reg_rtx
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
26926 rtx set_r12
= gen_rtx_SET (r12
, sp_reg_rtx
);
26927 emit_insn_before (set_r12
, sp_adjust
);
26929 else if (frame_off
!= 0 || REGNO (frame_reg_rtx
) != 12)
26931 rtx r12
= gen_rtx_REG (Pmode
, 12);
26932 if (frame_off
== 0)
26933 emit_move_insn (r12
, frame_reg_rtx
);
26935 emit_insn (gen_add3_insn (r12
, frame_reg_rtx
, GEN_INT (frame_off
)));
26940 rtx r12
= gen_rtx_REG (Pmode
, 12);
26941 rtx r29
= gen_rtx_REG (Pmode
, 29);
26942 rtx cr7
= gen_rtx_REG (CCUNSmode
, CR7_REGNO
);
26943 rtx not_more
= gen_label_rtx ();
26946 jump
= gen_rtx_IF_THEN_ELSE (VOIDmode
,
26947 gen_rtx_GEU (VOIDmode
, cr7
, const0_rtx
),
26948 gen_rtx_LABEL_REF (VOIDmode
, not_more
),
26950 jump
= emit_jump_insn (gen_rtx_SET (pc_rtx
, jump
));
26951 JUMP_LABEL (jump
) = not_more
;
26952 LABEL_NUSES (not_more
) += 1;
26953 emit_move_insn (r12
, r29
);
26954 emit_label (not_more
);
26958 /* Emit function prologue as insns. */
26961 rs6000_emit_prologue (void)
26963 rs6000_stack_t
*info
= rs6000_stack_info ();
26964 machine_mode reg_mode
= Pmode
;
26965 int reg_size
= TARGET_32BIT
? 4 : 8;
26966 machine_mode fp_reg_mode
= (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
26968 int fp_reg_size
= 8;
26969 rtx sp_reg_rtx
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
26970 rtx frame_reg_rtx
= sp_reg_rtx
;
26971 unsigned int cr_save_regno
;
26972 rtx cr_save_rtx
= NULL_RTX
;
26975 int using_static_chain_p
= (cfun
->static_chain_decl
!= NULL_TREE
26976 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM
)
26977 && call_used_regs
[STATIC_CHAIN_REGNUM
]);
26978 int using_split_stack
= (flag_split_stack
26979 && (lookup_attribute ("no_split_stack",
26980 DECL_ATTRIBUTES (cfun
->decl
))
26983 /* Offset to top of frame for frame_reg and sp respectively. */
26984 HOST_WIDE_INT frame_off
= 0;
26985 HOST_WIDE_INT sp_off
= 0;
26986 /* sp_adjust is the stack adjusting instruction, tracked so that the
26987 insn setting up the split-stack arg pointer can be emitted just
26988 prior to it, when r12 is not used here for other purposes. */
26989 rtx_insn
*sp_adjust
= 0;
26992 /* Track and check usage of r0, r11, r12. */
26993 int reg_inuse
= using_static_chain_p
? 1 << 11 : 0;
26994 #define START_USE(R) do \
26996 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26997 reg_inuse |= 1 << (R); \
26999 #define END_USE(R) do \
27001 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
27002 reg_inuse &= ~(1 << (R)); \
27004 #define NOT_INUSE(R) do \
27006 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
27009 #define START_USE(R) do {} while (0)
27010 #define END_USE(R) do {} while (0)
27011 #define NOT_INUSE(R) do {} while (0)
27014 if (DEFAULT_ABI
== ABI_ELFv2
27015 && !TARGET_SINGLE_PIC_BASE
)
27017 cfun
->machine
->r2_setup_needed
= df_regs_ever_live_p (TOC_REGNUM
);
27019 /* With -mminimal-toc we may generate an extra use of r2 below. */
27020 if (TARGET_TOC
&& TARGET_MINIMAL_TOC
27021 && !constant_pool_empty_p ())
27022 cfun
->machine
->r2_setup_needed
= true;
27026 if (flag_stack_usage_info
)
27027 current_function_static_stack_size
= info
->total_size
;
27029 if (flag_stack_check
== STATIC_BUILTIN_STACK_CHECK
)
27031 HOST_WIDE_INT size
= info
->total_size
;
27033 if (crtl
->is_leaf
&& !cfun
->calls_alloca
)
27035 if (size
> PROBE_INTERVAL
&& size
> get_stack_check_protect ())
27036 rs6000_emit_probe_stack_range (get_stack_check_protect (),
27037 size
- get_stack_check_protect ());
27040 rs6000_emit_probe_stack_range (get_stack_check_protect (), size
);
27043 if (TARGET_FIX_AND_CONTINUE
)
27045 /* gdb on darwin arranges to forward a function from the old
27046 address by modifying the first 5 instructions of the function
27047 to branch to the overriding function. This is necessary to
27048 permit function pointers that point to the old function to
27049 actually forward to the new function. */
27050 emit_insn (gen_nop ());
27051 emit_insn (gen_nop ());
27052 emit_insn (gen_nop ());
27053 emit_insn (gen_nop ());
27054 emit_insn (gen_nop ());
27057 /* Handle world saves specially here. */
27058 if (WORLD_SAVE_P (info
))
27065 /* save_world expects lr in r0. */
27066 reg0
= gen_rtx_REG (Pmode
, 0);
27067 if (info
->lr_save_p
)
27069 insn
= emit_move_insn (reg0
,
27070 gen_rtx_REG (Pmode
, LR_REGNO
));
27071 RTX_FRAME_RELATED_P (insn
) = 1;
27074 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
27075 assumptions about the offsets of various bits of the stack
27077 gcc_assert (info
->gp_save_offset
== -220
27078 && info
->fp_save_offset
== -144
27079 && info
->lr_save_offset
== 8
27080 && info
->cr_save_offset
== 4
27083 && (!crtl
->calls_eh_return
27084 || info
->ehrd_offset
== -432)
27085 && info
->vrsave_save_offset
== -224
27086 && info
->altivec_save_offset
== -416);
27088 treg
= gen_rtx_REG (SImode
, 11);
27089 emit_move_insn (treg
, GEN_INT (-info
->total_size
));
27091 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
27092 in R11. It also clobbers R12, so beware! */
27094 /* Preserve CR2 for save_world prologues */
27096 sz
+= 32 - info
->first_gp_reg_save
;
27097 sz
+= 64 - info
->first_fp_reg_save
;
27098 sz
+= LAST_ALTIVEC_REGNO
- info
->first_altivec_reg_save
+ 1;
27099 p
= rtvec_alloc (sz
);
27101 RTVEC_ELT (p
, j
++) = gen_rtx_CLOBBER (VOIDmode
,
27102 gen_rtx_REG (SImode
,
27104 RTVEC_ELT (p
, j
++) = gen_rtx_USE (VOIDmode
,
27105 gen_rtx_SYMBOL_REF (Pmode
,
27107 /* We do floats first so that the instruction pattern matches
27109 for (i
= 0; i
< 64 - info
->first_fp_reg_save
; i
++)
27111 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
27113 info
->first_fp_reg_save
+ i
),
27115 info
->fp_save_offset
+ frame_off
+ 8 * i
);
27116 for (i
= 0; info
->first_altivec_reg_save
+ i
<= LAST_ALTIVEC_REGNO
; i
++)
27118 = gen_frame_store (gen_rtx_REG (V4SImode
,
27119 info
->first_altivec_reg_save
+ i
),
27121 info
->altivec_save_offset
+ frame_off
+ 16 * i
);
27122 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
27124 = gen_frame_store (gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
),
27126 info
->gp_save_offset
+ frame_off
+ reg_size
* i
);
27128 /* CR register traditionally saved as CR2. */
27130 = gen_frame_store (gen_rtx_REG (SImode
, CR2_REGNO
),
27131 frame_reg_rtx
, info
->cr_save_offset
+ frame_off
);
27132 /* Explain about use of R0. */
27133 if (info
->lr_save_p
)
27135 = gen_frame_store (reg0
,
27136 frame_reg_rtx
, info
->lr_save_offset
+ frame_off
);
27137 /* Explain what happens to the stack pointer. */
27139 rtx newval
= gen_rtx_PLUS (Pmode
, sp_reg_rtx
, treg
);
27140 RTVEC_ELT (p
, j
++) = gen_rtx_SET (sp_reg_rtx
, newval
);
27143 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
27144 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
27145 treg
, GEN_INT (-info
->total_size
));
27146 sp_off
= frame_off
= info
->total_size
;
27149 strategy
= info
->savres_strategy
;
27151 /* For V.4, update stack before we do any saving and set back pointer. */
27152 if (! WORLD_SAVE_P (info
)
27154 && (DEFAULT_ABI
== ABI_V4
27155 || crtl
->calls_eh_return
))
27157 bool need_r11
= (!(strategy
& SAVE_INLINE_FPRS
)
27158 || !(strategy
& SAVE_INLINE_GPRS
)
27159 || !(strategy
& SAVE_INLINE_VRS
));
27160 int ptr_regno
= -1;
27161 rtx ptr_reg
= NULL_RTX
;
27164 if (info
->total_size
< 32767)
27165 frame_off
= info
->total_size
;
27168 else if (info
->cr_save_p
27170 || info
->first_fp_reg_save
< 64
27171 || info
->first_gp_reg_save
< 32
27172 || info
->altivec_size
!= 0
27173 || info
->vrsave_size
!= 0
27174 || crtl
->calls_eh_return
)
27178 /* The prologue won't be saving any regs so there is no need
27179 to set up a frame register to access any frame save area.
27180 We also won't be using frame_off anywhere below, but set
27181 the correct value anyway to protect against future
27182 changes to this function. */
27183 frame_off
= info
->total_size
;
27185 if (ptr_regno
!= -1)
27187 /* Set up the frame offset to that needed by the first
27188 out-of-line save function. */
27189 START_USE (ptr_regno
);
27190 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
27191 frame_reg_rtx
= ptr_reg
;
27192 if (!(strategy
& SAVE_INLINE_FPRS
) && info
->fp_size
!= 0)
27193 gcc_checking_assert (info
->fp_save_offset
+ info
->fp_size
== 0);
27194 else if (!(strategy
& SAVE_INLINE_GPRS
) && info
->first_gp_reg_save
< 32)
27195 ptr_off
= info
->gp_save_offset
+ info
->gp_size
;
27196 else if (!(strategy
& SAVE_INLINE_VRS
) && info
->altivec_size
!= 0)
27197 ptr_off
= info
->altivec_save_offset
+ info
->altivec_size
;
27198 frame_off
= -ptr_off
;
27200 sp_adjust
= rs6000_emit_allocate_stack (info
->total_size
,
27202 if (REGNO (frame_reg_rtx
) == 12)
27204 sp_off
= info
->total_size
;
27205 if (frame_reg_rtx
!= sp_reg_rtx
)
27206 rs6000_emit_stack_tie (frame_reg_rtx
, false);
27209 /* If we use the link register, get it into r0. */
27210 if (!WORLD_SAVE_P (info
) && info
->lr_save_p
27211 && !cfun
->machine
->lr_is_wrapped_separately
)
27213 rtx addr
, reg
, mem
;
27215 reg
= gen_rtx_REG (Pmode
, 0);
27217 insn
= emit_move_insn (reg
, gen_rtx_REG (Pmode
, LR_REGNO
));
27218 RTX_FRAME_RELATED_P (insn
) = 1;
27220 if (!(strategy
& (SAVE_NOINLINE_GPRS_SAVES_LR
27221 | SAVE_NOINLINE_FPRS_SAVES_LR
)))
27223 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
27224 GEN_INT (info
->lr_save_offset
+ frame_off
));
27225 mem
= gen_rtx_MEM (Pmode
, addr
);
27226 /* This should not be of rs6000_sr_alias_set, because of
27227 __builtin_return_address. */
27229 insn
= emit_move_insn (mem
, reg
);
27230 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
27231 NULL_RTX
, NULL_RTX
);
27236 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
27237 r12 will be needed by out-of-line gpr restore. */
27238 cr_save_regno
= ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
27239 && !(strategy
& (SAVE_INLINE_GPRS
27240 | SAVE_NOINLINE_GPRS_SAVES_LR
))
27242 if (!WORLD_SAVE_P (info
)
27244 && REGNO (frame_reg_rtx
) != cr_save_regno
27245 && !(using_static_chain_p
&& cr_save_regno
== 11)
27246 && !(using_split_stack
&& cr_save_regno
== 12 && sp_adjust
))
27248 cr_save_rtx
= gen_rtx_REG (SImode
, cr_save_regno
);
27249 START_USE (cr_save_regno
);
27250 rs6000_emit_prologue_move_from_cr (cr_save_rtx
);
27253 /* Do any required saving of fpr's. If only one or two to save, do
27254 it ourselves. Otherwise, call function. */
27255 if (!WORLD_SAVE_P (info
) && (strategy
& SAVE_INLINE_FPRS
))
27257 int offset
= info
->fp_save_offset
+ frame_off
;
27258 for (int i
= info
->first_fp_reg_save
; i
< 64; i
++)
27261 && !cfun
->machine
->fpr_is_wrapped_separately
[i
- 32])
27262 emit_frame_save (frame_reg_rtx
, fp_reg_mode
, i
, offset
,
27263 sp_off
- frame_off
);
27265 offset
+= fp_reg_size
;
27268 else if (!WORLD_SAVE_P (info
) && info
->first_fp_reg_save
!= 64)
27270 bool lr
= (strategy
& SAVE_NOINLINE_FPRS_SAVES_LR
) != 0;
27271 int sel
= SAVRES_SAVE
| SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
27272 unsigned ptr_regno
= ptr_regno_for_savres (sel
);
27273 rtx ptr_reg
= frame_reg_rtx
;
27275 if (REGNO (frame_reg_rtx
) == ptr_regno
)
27276 gcc_checking_assert (frame_off
== 0);
27279 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
27280 NOT_INUSE (ptr_regno
);
27281 emit_insn (gen_add3_insn (ptr_reg
,
27282 frame_reg_rtx
, GEN_INT (frame_off
)));
27284 insn
= rs6000_emit_savres_rtx (info
, ptr_reg
,
27285 info
->fp_save_offset
,
27286 info
->lr_save_offset
,
27288 rs6000_frame_related (insn
, ptr_reg
, sp_off
,
27289 NULL_RTX
, NULL_RTX
);
27294 /* Save GPRs. This is done as a PARALLEL if we are using
27295 the store-multiple instructions. */
27296 if (!WORLD_SAVE_P (info
) && !(strategy
& SAVE_INLINE_GPRS
))
27298 bool lr
= (strategy
& SAVE_NOINLINE_GPRS_SAVES_LR
) != 0;
27299 int sel
= SAVRES_SAVE
| SAVRES_GPR
| (lr
? SAVRES_LR
: 0);
27300 unsigned ptr_regno
= ptr_regno_for_savres (sel
);
27301 rtx ptr_reg
= frame_reg_rtx
;
27302 bool ptr_set_up
= REGNO (ptr_reg
) == ptr_regno
;
27303 int end_save
= info
->gp_save_offset
+ info
->gp_size
;
27306 if (ptr_regno
== 12)
27309 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
27311 /* Need to adjust r11 (r12) if we saved any FPRs. */
27312 if (end_save
+ frame_off
!= 0)
27314 rtx offset
= GEN_INT (end_save
+ frame_off
);
27317 frame_off
= -end_save
;
27319 NOT_INUSE (ptr_regno
);
27320 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
27322 else if (!ptr_set_up
)
27324 NOT_INUSE (ptr_regno
);
27325 emit_move_insn (ptr_reg
, frame_reg_rtx
);
27327 ptr_off
= -end_save
;
27328 insn
= rs6000_emit_savres_rtx (info
, ptr_reg
,
27329 info
->gp_save_offset
+ ptr_off
,
27330 info
->lr_save_offset
+ ptr_off
,
27332 rs6000_frame_related (insn
, ptr_reg
, sp_off
- ptr_off
,
27333 NULL_RTX
, NULL_RTX
);
27337 else if (!WORLD_SAVE_P (info
) && (strategy
& SAVE_MULTIPLE
))
27341 p
= rtvec_alloc (32 - info
->first_gp_reg_save
);
27342 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
27344 = gen_frame_store (gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
),
27346 info
->gp_save_offset
+ frame_off
+ reg_size
* i
);
27347 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
27348 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
27349 NULL_RTX
, NULL_RTX
);
27351 else if (!WORLD_SAVE_P (info
))
27353 int offset
= info
->gp_save_offset
+ frame_off
;
27354 for (int i
= info
->first_gp_reg_save
; i
< 32; i
++)
27357 && !cfun
->machine
->gpr_is_wrapped_separately
[i
])
27358 emit_frame_save (frame_reg_rtx
, reg_mode
, i
, offset
,
27359 sp_off
- frame_off
);
27361 offset
+= reg_size
;
27365 if (crtl
->calls_eh_return
)
27372 unsigned int regno
= EH_RETURN_DATA_REGNO (i
);
27373 if (regno
== INVALID_REGNUM
)
27377 p
= rtvec_alloc (i
);
27381 unsigned int regno
= EH_RETURN_DATA_REGNO (i
);
27382 if (regno
== INVALID_REGNUM
)
27386 = gen_frame_store (gen_rtx_REG (reg_mode
, regno
),
27388 info
->ehrd_offset
+ sp_off
+ reg_size
* (int) i
);
27389 RTVEC_ELT (p
, i
) = set
;
27390 RTX_FRAME_RELATED_P (set
) = 1;
27393 insn
= emit_insn (gen_blockage ());
27394 RTX_FRAME_RELATED_P (insn
) = 1;
27395 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, gen_rtx_PARALLEL (VOIDmode
, p
));
27398 /* In AIX ABI we need to make sure r2 is really saved. */
27399 if (TARGET_AIX
&& crtl
->calls_eh_return
)
27401 rtx tmp_reg
, tmp_reg_si
, hi
, lo
, compare_result
, toc_save_done
, jump
;
27402 rtx join_insn
, note
;
27403 rtx_insn
*save_insn
;
27404 long toc_restore_insn
;
27406 tmp_reg
= gen_rtx_REG (Pmode
, 11);
27407 tmp_reg_si
= gen_rtx_REG (SImode
, 11);
27408 if (using_static_chain_p
)
27411 emit_move_insn (gen_rtx_REG (Pmode
, 0), tmp_reg
);
27415 emit_move_insn (tmp_reg
, gen_rtx_REG (Pmode
, LR_REGNO
));
27416 /* Peek at instruction to which this function returns. If it's
27417 restoring r2, then we know we've already saved r2. We can't
27418 unconditionally save r2 because the value we have will already
27419 be updated if we arrived at this function via a plt call or
27420 toc adjusting stub. */
27421 emit_move_insn (tmp_reg_si
, gen_rtx_MEM (SImode
, tmp_reg
));
27422 toc_restore_insn
= ((TARGET_32BIT
? 0x80410000 : 0xE8410000)
27423 + RS6000_TOC_SAVE_SLOT
);
27424 hi
= gen_int_mode (toc_restore_insn
& ~0xffff, SImode
);
27425 emit_insn (gen_xorsi3 (tmp_reg_si
, tmp_reg_si
, hi
));
27426 compare_result
= gen_rtx_REG (CCUNSmode
, CR0_REGNO
);
27427 validate_condition_mode (EQ
, CCUNSmode
);
27428 lo
= gen_int_mode (toc_restore_insn
& 0xffff, SImode
);
27429 emit_insn (gen_rtx_SET (compare_result
,
27430 gen_rtx_COMPARE (CCUNSmode
, tmp_reg_si
, lo
)));
27431 toc_save_done
= gen_label_rtx ();
27432 jump
= gen_rtx_IF_THEN_ELSE (VOIDmode
,
27433 gen_rtx_EQ (VOIDmode
, compare_result
,
27435 gen_rtx_LABEL_REF (VOIDmode
, toc_save_done
),
27437 jump
= emit_jump_insn (gen_rtx_SET (pc_rtx
, jump
));
27438 JUMP_LABEL (jump
) = toc_save_done
;
27439 LABEL_NUSES (toc_save_done
) += 1;
27441 save_insn
= emit_frame_save (frame_reg_rtx
, reg_mode
,
27442 TOC_REGNUM
, frame_off
+ RS6000_TOC_SAVE_SLOT
,
27443 sp_off
- frame_off
);
27445 emit_label (toc_save_done
);
27447 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
27448 have a CFG that has different saves along different paths.
27449 Move the note to a dummy blockage insn, which describes that
27450 R2 is unconditionally saved after the label. */
27451 /* ??? An alternate representation might be a special insn pattern
27452 containing both the branch and the store. That might let the
27453 code that minimizes the number of DW_CFA_advance opcodes better
27454 freedom in placing the annotations. */
27455 note
= find_reg_note (save_insn
, REG_FRAME_RELATED_EXPR
, NULL
);
27457 remove_note (save_insn
, note
);
27459 note
= alloc_reg_note (REG_FRAME_RELATED_EXPR
,
27460 copy_rtx (PATTERN (save_insn
)), NULL_RTX
);
27461 RTX_FRAME_RELATED_P (save_insn
) = 0;
27463 join_insn
= emit_insn (gen_blockage ());
27464 REG_NOTES (join_insn
) = note
;
27465 RTX_FRAME_RELATED_P (join_insn
) = 1;
27467 if (using_static_chain_p
)
27469 emit_move_insn (tmp_reg
, gen_rtx_REG (Pmode
, 0));
27476 /* Save CR if we use any that must be preserved. */
27477 if (!WORLD_SAVE_P (info
) && info
->cr_save_p
)
27479 rtx addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
27480 GEN_INT (info
->cr_save_offset
+ frame_off
));
27481 rtx mem
= gen_frame_mem (SImode
, addr
);
27483 /* If we didn't copy cr before, do so now using r0. */
27484 if (cr_save_rtx
== NULL_RTX
)
27487 cr_save_rtx
= gen_rtx_REG (SImode
, 0);
27488 rs6000_emit_prologue_move_from_cr (cr_save_rtx
);
27491 /* Saving CR requires a two-instruction sequence: one instruction
27492 to move the CR to a general-purpose register, and a second
27493 instruction that stores the GPR to memory.
27495 We do not emit any DWARF CFI records for the first of these,
27496 because we cannot properly represent the fact that CR is saved in
27497 a register. One reason is that we cannot express that multiple
27498 CR fields are saved; another reason is that on 64-bit, the size
27499 of the CR register in DWARF (4 bytes) differs from the size of
27500 a general-purpose register.
27502 This means if any intervening instruction were to clobber one of
27503 the call-saved CR fields, we'd have incorrect CFI. To prevent
27504 this from happening, we mark the store to memory as a use of
27505 those CR fields, which prevents any such instruction from being
27506 scheduled in between the two instructions. */
27511 crsave_v
[n_crsave
++] = gen_rtx_SET (mem
, cr_save_rtx
);
27512 for (i
= 0; i
< 8; i
++)
27513 if (save_reg_p (CR0_REGNO
+ i
))
27514 crsave_v
[n_crsave
++]
27515 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (CCmode
, CR0_REGNO
+ i
));
27517 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
,
27518 gen_rtvec_v (n_crsave
, crsave_v
)));
27519 END_USE (REGNO (cr_save_rtx
));
27521 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
27522 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
27523 so we need to construct a frame expression manually. */
27524 RTX_FRAME_RELATED_P (insn
) = 1;
27526 /* Update address to be stack-pointer relative, like
27527 rs6000_frame_related would do. */
27528 addr
= gen_rtx_PLUS (Pmode
, gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
),
27529 GEN_INT (info
->cr_save_offset
+ sp_off
));
27530 mem
= gen_frame_mem (SImode
, addr
);
27532 if (DEFAULT_ABI
== ABI_ELFv2
)
27534 /* In the ELFv2 ABI we generate separate CFI records for each
27535 CR field that was actually saved. They all point to the
27536 same 32-bit stack slot. */
27540 for (i
= 0; i
< 8; i
++)
27541 if (save_reg_p (CR0_REGNO
+ i
))
27544 = gen_rtx_SET (mem
, gen_rtx_REG (SImode
, CR0_REGNO
+ i
));
27546 RTX_FRAME_RELATED_P (crframe
[n_crframe
]) = 1;
27550 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
27551 gen_rtx_PARALLEL (VOIDmode
,
27552 gen_rtvec_v (n_crframe
, crframe
)));
27556 /* In other ABIs, by convention, we use a single CR regnum to
27557 represent the fact that all call-saved CR fields are saved.
27558 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
27559 rtx set
= gen_rtx_SET (mem
, gen_rtx_REG (SImode
, CR2_REGNO
));
27560 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, set
);
27564 /* In the ELFv2 ABI we need to save all call-saved CR fields into
27565 *separate* slots if the routine calls __builtin_eh_return, so
27566 that they can be independently restored by the unwinder. */
27567 if (DEFAULT_ABI
== ABI_ELFv2
&& crtl
->calls_eh_return
)
27569 int i
, cr_off
= info
->ehcr_offset
;
27572 /* ??? We might get better performance by using multiple mfocrf
27574 crsave
= gen_rtx_REG (SImode
, 0);
27575 emit_insn (gen_prologue_movesi_from_cr (crsave
));
27577 for (i
= 0; i
< 8; i
++)
27578 if (!call_used_regs
[CR0_REGNO
+ i
])
27580 rtvec p
= rtvec_alloc (2);
27582 = gen_frame_store (crsave
, frame_reg_rtx
, cr_off
+ frame_off
);
27584 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (CCmode
, CR0_REGNO
+ i
));
27586 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
27588 RTX_FRAME_RELATED_P (insn
) = 1;
27589 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
27590 gen_frame_store (gen_rtx_REG (SImode
, CR0_REGNO
+ i
),
27591 sp_reg_rtx
, cr_off
+ sp_off
));
27593 cr_off
+= reg_size
;
27597 /* If we are emitting stack probes, but allocate no stack, then
27598 just note that in the dump file. */
27599 if (flag_stack_clash_protection
27602 dump_stack_clash_frame_info (NO_PROBE_NO_FRAME
, false);
27604 /* Update stack and set back pointer unless this is V.4,
27605 for which it was done previously. */
27606 if (!WORLD_SAVE_P (info
) && info
->push_p
27607 && !(DEFAULT_ABI
== ABI_V4
|| crtl
->calls_eh_return
))
27609 rtx ptr_reg
= NULL
;
27612 /* If saving altivec regs we need to be able to address all save
27613 locations using a 16-bit offset. */
27614 if ((strategy
& SAVE_INLINE_VRS
) == 0
27615 || (info
->altivec_size
!= 0
27616 && (info
->altivec_save_offset
+ info
->altivec_size
- 16
27617 + info
->total_size
- frame_off
) > 32767)
27618 || (info
->vrsave_size
!= 0
27619 && (info
->vrsave_save_offset
27620 + info
->total_size
- frame_off
) > 32767))
27622 int sel
= SAVRES_SAVE
| SAVRES_VR
;
27623 unsigned ptr_regno
= ptr_regno_for_savres (sel
);
27625 if (using_static_chain_p
27626 && ptr_regno
== STATIC_CHAIN_REGNUM
)
27628 if (REGNO (frame_reg_rtx
) != ptr_regno
)
27629 START_USE (ptr_regno
);
27630 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
27631 frame_reg_rtx
= ptr_reg
;
27632 ptr_off
= info
->altivec_save_offset
+ info
->altivec_size
;
27633 frame_off
= -ptr_off
;
27635 else if (REGNO (frame_reg_rtx
) == 1)
27636 frame_off
= info
->total_size
;
27637 sp_adjust
= rs6000_emit_allocate_stack (info
->total_size
,
27639 if (REGNO (frame_reg_rtx
) == 12)
27641 sp_off
= info
->total_size
;
27642 if (frame_reg_rtx
!= sp_reg_rtx
)
27643 rs6000_emit_stack_tie (frame_reg_rtx
, false);
27646 /* Set frame pointer, if needed. */
27647 if (frame_pointer_needed
)
27649 insn
= emit_move_insn (gen_rtx_REG (Pmode
, HARD_FRAME_POINTER_REGNUM
),
27651 RTX_FRAME_RELATED_P (insn
) = 1;
27654 /* Save AltiVec registers if needed. Save here because the red zone does
27655 not always include AltiVec registers. */
27656 if (!WORLD_SAVE_P (info
)
27657 && info
->altivec_size
!= 0 && (strategy
& SAVE_INLINE_VRS
) == 0)
27659 int end_save
= info
->altivec_save_offset
+ info
->altivec_size
;
27661 /* Oddly, the vector save/restore functions point r0 at the end
27662 of the save area, then use r11 or r12 to load offsets for
27663 [reg+reg] addressing. */
27664 rtx ptr_reg
= gen_rtx_REG (Pmode
, 0);
27665 int scratch_regno
= ptr_regno_for_savres (SAVRES_SAVE
| SAVRES_VR
);
27666 rtx scratch_reg
= gen_rtx_REG (Pmode
, scratch_regno
);
27668 gcc_checking_assert (scratch_regno
== 11 || scratch_regno
== 12);
27670 if (scratch_regno
== 12)
27672 if (end_save
+ frame_off
!= 0)
27674 rtx offset
= GEN_INT (end_save
+ frame_off
);
27676 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
27679 emit_move_insn (ptr_reg
, frame_reg_rtx
);
27681 ptr_off
= -end_save
;
27682 insn
= rs6000_emit_savres_rtx (info
, scratch_reg
,
27683 info
->altivec_save_offset
+ ptr_off
,
27684 0, V4SImode
, SAVRES_SAVE
| SAVRES_VR
);
27685 rs6000_frame_related (insn
, scratch_reg
, sp_off
- ptr_off
,
27686 NULL_RTX
, NULL_RTX
);
27687 if (REGNO (frame_reg_rtx
) == REGNO (scratch_reg
))
27689 /* The oddity mentioned above clobbered our frame reg. */
27690 emit_move_insn (frame_reg_rtx
, ptr_reg
);
27691 frame_off
= ptr_off
;
27694 else if (!WORLD_SAVE_P (info
)
27695 && info
->altivec_size
!= 0)
27699 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
27700 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
27702 rtx areg
, savereg
, mem
;
27703 HOST_WIDE_INT offset
;
27705 offset
= (info
->altivec_save_offset
+ frame_off
27706 + 16 * (i
- info
->first_altivec_reg_save
));
27708 savereg
= gen_rtx_REG (V4SImode
, i
);
27710 if (TARGET_P9_VECTOR
&& quad_address_offset_p (offset
))
27712 mem
= gen_frame_mem (V4SImode
,
27713 gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
27714 GEN_INT (offset
)));
27715 insn
= emit_insn (gen_rtx_SET (mem
, savereg
));
27721 areg
= gen_rtx_REG (Pmode
, 0);
27722 emit_move_insn (areg
, GEN_INT (offset
));
27724 /* AltiVec addressing mode is [reg+reg]. */
27725 mem
= gen_frame_mem (V4SImode
,
27726 gen_rtx_PLUS (Pmode
, frame_reg_rtx
, areg
));
27728 /* Rather than emitting a generic move, force use of the stvx
27729 instruction, which we always want on ISA 2.07 (power8) systems.
27730 In particular we don't want xxpermdi/stxvd2x for little
27732 insn
= emit_insn (gen_altivec_stvx_v4si_internal (mem
, savereg
));
27735 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
27736 areg
, GEN_INT (offset
));
27740 /* VRSAVE is a bit vector representing which AltiVec registers
27741 are used. The OS uses this to determine which vector
27742 registers to save on a context switch. We need to save
27743 VRSAVE on the stack frame, add whatever AltiVec registers we
27744 used in this function, and do the corresponding magic in the
27747 if (!WORLD_SAVE_P (info
) && info
->vrsave_size
!= 0)
27749 /* Get VRSAVE into a GPR. Note that ABI_V4 and ABI_DARWIN might
27750 be using r12 as frame_reg_rtx and r11 as the static chain
27751 pointer for nested functions. */
27752 int save_regno
= 12;
27753 if ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
27754 && !using_static_chain_p
)
27756 else if (using_split_stack
|| REGNO (frame_reg_rtx
) == 12)
27759 if (using_static_chain_p
)
27762 NOT_INUSE (save_regno
);
27764 emit_vrsave_prologue (info
, save_regno
, frame_off
, frame_reg_rtx
);
27767 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
27768 if (!TARGET_SINGLE_PIC_BASE
27769 && ((TARGET_TOC
&& TARGET_MINIMAL_TOC
27770 && !constant_pool_empty_p ())
27771 || (DEFAULT_ABI
== ABI_V4
27772 && (flag_pic
== 1 || (flag_pic
&& TARGET_SECURE_PLT
))
27773 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM
))))
27775 /* If emit_load_toc_table will use the link register, we need to save
27776 it. We use R12 for this purpose because emit_load_toc_table
27777 can use register 0. This allows us to use a plain 'blr' to return
27778 from the procedure more often. */
27779 int save_LR_around_toc_setup
= (TARGET_ELF
27780 && DEFAULT_ABI
== ABI_V4
27782 && ! info
->lr_save_p
27783 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun
)->preds
) > 0);
27784 if (save_LR_around_toc_setup
)
27786 rtx lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
27787 rtx tmp
= gen_rtx_REG (Pmode
, 12);
27790 insn
= emit_move_insn (tmp
, lr
);
27791 RTX_FRAME_RELATED_P (insn
) = 1;
27793 rs6000_emit_load_toc_table (TRUE
);
27795 insn
= emit_move_insn (lr
, tmp
);
27796 add_reg_note (insn
, REG_CFA_RESTORE
, lr
);
27797 RTX_FRAME_RELATED_P (insn
) = 1;
27800 rs6000_emit_load_toc_table (TRUE
);
27804 if (!TARGET_SINGLE_PIC_BASE
27805 && DEFAULT_ABI
== ABI_DARWIN
27806 && flag_pic
&& crtl
->uses_pic_offset_table
)
27808 rtx lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
27809 rtx src
= gen_rtx_SYMBOL_REF (Pmode
, MACHOPIC_FUNCTION_BASE_NAME
);
27811 /* Save and restore LR locally around this call (in R0). */
27812 if (!info
->lr_save_p
)
27813 emit_move_insn (gen_rtx_REG (Pmode
, 0), lr
);
27815 emit_insn (gen_load_macho_picbase (src
));
27817 emit_move_insn (gen_rtx_REG (Pmode
,
27818 RS6000_PIC_OFFSET_TABLE_REGNUM
),
27821 if (!info
->lr_save_p
)
27822 emit_move_insn (lr
, gen_rtx_REG (Pmode
, 0));
27826 /* If we need to, save the TOC register after doing the stack setup.
27827 Do not emit eh frame info for this save. The unwinder wants info,
27828 conceptually attached to instructions in this function, about
27829 register values in the caller of this function. This R2 may have
27830 already been changed from the value in the caller.
27831 We don't attempt to write accurate DWARF EH frame info for R2
27832 because code emitted by gcc for a (non-pointer) function call
27833 doesn't save and restore R2. Instead, R2 is managed out-of-line
27834 by a linker generated plt call stub when the function resides in
27835 a shared library. This behavior is costly to describe in DWARF,
27836 both in terms of the size of DWARF info and the time taken in the
27837 unwinder to interpret it. R2 changes, apart from the
27838 calls_eh_return case earlier in this function, are handled by
27839 linux-unwind.h frob_update_context. */
27840 if (rs6000_save_toc_in_prologue_p ())
27842 rtx reg
= gen_rtx_REG (reg_mode
, TOC_REGNUM
);
27843 emit_insn (gen_frame_store (reg
, sp_reg_rtx
, RS6000_TOC_SAVE_SLOT
));
27846 /* Set up the arg pointer (r12) for -fsplit-stack code. */
27847 if (using_split_stack
&& split_stack_arg_pointer_used_p ())
27848 emit_split_stack_prologue (info
, sp_adjust
, frame_off
, frame_reg_rtx
);
27851 /* Output .extern statements for the save/restore routines we use. */
27854 rs6000_output_savres_externs (FILE *file
)
27856 rs6000_stack_t
*info
= rs6000_stack_info ();
27858 if (TARGET_DEBUG_STACK
)
27859 debug_stack_info (info
);
27861 /* Write .extern for any function we will call to save and restore
27863 if (info
->first_fp_reg_save
< 64
27868 int regno
= info
->first_fp_reg_save
- 32;
27870 if ((info
->savres_strategy
& SAVE_INLINE_FPRS
) == 0)
27872 bool lr
= (info
->savres_strategy
& SAVE_NOINLINE_FPRS_SAVES_LR
) != 0;
27873 int sel
= SAVRES_SAVE
| SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
27874 name
= rs6000_savres_routine_name (regno
, sel
);
27875 fprintf (file
, "\t.extern %s\n", name
);
27877 if ((info
->savres_strategy
& REST_INLINE_FPRS
) == 0)
27879 bool lr
= (info
->savres_strategy
27880 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
) == 0;
27881 int sel
= SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
27882 name
= rs6000_savres_routine_name (regno
, sel
);
27883 fprintf (file
, "\t.extern %s\n", name
);
27888 /* Write function prologue. */
27891 rs6000_output_function_prologue (FILE *file
)
27893 if (!cfun
->is_thunk
)
27894 rs6000_output_savres_externs (file
);
27896 /* ELFv2 ABI r2 setup code and local entry point. This must follow
27897 immediately after the global entry point label. */
27898 if (rs6000_global_entry_point_needed_p ())
27900 const char *name
= XSTR (XEXP (DECL_RTL (current_function_decl
), 0), 0);
27902 (*targetm
.asm_out
.internal_label
) (file
, "LCF", rs6000_pic_labelno
);
27904 if (TARGET_CMODEL
!= CMODEL_LARGE
)
27906 /* In the small and medium code models, we assume the TOC is less
27907 2 GB away from the text section, so it can be computed via the
27908 following two-instruction sequence. */
27911 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
27912 fprintf (file
, "0:\taddis 2,12,.TOC.-");
27913 assemble_name (file
, buf
);
27914 fprintf (file
, "@ha\n");
27915 fprintf (file
, "\taddi 2,2,.TOC.-");
27916 assemble_name (file
, buf
);
27917 fprintf (file
, "@l\n");
27921 /* In the large code model, we allow arbitrary offsets between the
27922 TOC and the text section, so we have to load the offset from
27923 memory. The data field is emitted directly before the global
27924 entry point in rs6000_elf_declare_function_name. */
27927 #ifdef HAVE_AS_ENTRY_MARKERS
27928 /* If supported by the linker, emit a marker relocation. If the
27929 total code size of the final executable or shared library
27930 happens to fit into 2 GB after all, the linker will replace
27931 this code sequence with the sequence for the small or medium
27933 fprintf (file
, "\t.reloc .,R_PPC64_ENTRY\n");
27935 fprintf (file
, "\tld 2,");
27936 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCL", rs6000_pic_labelno
);
27937 assemble_name (file
, buf
);
27938 fprintf (file
, "-");
27939 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
27940 assemble_name (file
, buf
);
27941 fprintf (file
, "(12)\n");
27942 fprintf (file
, "\tadd 2,2,12\n");
27945 fputs ("\t.localentry\t", file
);
27946 assemble_name (file
, name
);
27947 fputs (",.-", file
);
27948 assemble_name (file
, name
);
27949 fputs ("\n", file
);
27952 /* Output -mprofile-kernel code. This needs to be done here instead of
27953 in output_function_profile since it must go after the ELFv2 ABI
27954 local entry point. */
27955 if (TARGET_PROFILE_KERNEL
&& crtl
->profile
)
27957 gcc_assert (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
);
27958 gcc_assert (!TARGET_32BIT
);
27960 asm_fprintf (file
, "\tmflr %s\n", reg_names
[0]);
27962 /* In the ELFv2 ABI we have no compiler stack word. It must be
27963 the resposibility of _mcount to preserve the static chain
27964 register if required. */
27965 if (DEFAULT_ABI
!= ABI_ELFv2
27966 && cfun
->static_chain_decl
!= NULL
)
27968 asm_fprintf (file
, "\tstd %s,24(%s)\n",
27969 reg_names
[STATIC_CHAIN_REGNUM
], reg_names
[1]);
27970 fprintf (file
, "\tbl %s\n", RS6000_MCOUNT
);
27971 asm_fprintf (file
, "\tld %s,24(%s)\n",
27972 reg_names
[STATIC_CHAIN_REGNUM
], reg_names
[1]);
27975 fprintf (file
, "\tbl %s\n", RS6000_MCOUNT
);
27978 rs6000_pic_labelno
++;
27981 /* -mprofile-kernel code calls mcount before the function prolog,
27982 so a profiled leaf function should stay a leaf function. */
27984 rs6000_keep_leaf_when_profiled ()
27986 return TARGET_PROFILE_KERNEL
;
27989 /* Non-zero if vmx regs are restored before the frame pop, zero if
27990 we restore after the pop when possible. */
27991 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
27993 /* Restoring cr is a two step process: loading a reg from the frame
27994 save, then moving the reg to cr. For ABI_V4 we must let the
27995 unwinder know that the stack location is no longer valid at or
27996 before the stack deallocation, but we can't emit a cfa_restore for
27997 cr at the stack deallocation like we do for other registers.
27998 The trouble is that it is possible for the move to cr to be
27999 scheduled after the stack deallocation. So say exactly where cr
28000 is located on each of the two insns. */
28003 load_cr_save (int regno
, rtx frame_reg_rtx
, int offset
, bool exit_func
)
28005 rtx mem
= gen_frame_mem_offset (SImode
, frame_reg_rtx
, offset
);
28006 rtx reg
= gen_rtx_REG (SImode
, regno
);
28007 rtx_insn
*insn
= emit_move_insn (reg
, mem
);
28009 if (!exit_func
&& DEFAULT_ABI
== ABI_V4
)
28011 rtx cr
= gen_rtx_REG (SImode
, CR2_REGNO
);
28012 rtx set
= gen_rtx_SET (reg
, cr
);
28014 add_reg_note (insn
, REG_CFA_REGISTER
, set
);
28015 RTX_FRAME_RELATED_P (insn
) = 1;
28020 /* Reload CR from REG. */
28023 restore_saved_cr (rtx reg
, int using_mfcr_multiple
, bool exit_func
)
28028 if (using_mfcr_multiple
)
28030 for (i
= 0; i
< 8; i
++)
28031 if (save_reg_p (CR0_REGNO
+ i
))
28033 gcc_assert (count
);
28036 if (using_mfcr_multiple
&& count
> 1)
28042 p
= rtvec_alloc (count
);
28045 for (i
= 0; i
< 8; i
++)
28046 if (save_reg_p (CR0_REGNO
+ i
))
28048 rtvec r
= rtvec_alloc (2);
28049 RTVEC_ELT (r
, 0) = reg
;
28050 RTVEC_ELT (r
, 1) = GEN_INT (1 << (7-i
));
28051 RTVEC_ELT (p
, ndx
) =
28052 gen_rtx_SET (gen_rtx_REG (CCmode
, CR0_REGNO
+ i
),
28053 gen_rtx_UNSPEC (CCmode
, r
, UNSPEC_MOVESI_TO_CR
));
28056 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
28057 gcc_assert (ndx
== count
);
28059 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
28060 CR field separately. */
28061 if (!exit_func
&& DEFAULT_ABI
== ABI_ELFv2
&& flag_shrink_wrap
)
28063 for (i
= 0; i
< 8; i
++)
28064 if (save_reg_p (CR0_REGNO
+ i
))
28065 add_reg_note (insn
, REG_CFA_RESTORE
,
28066 gen_rtx_REG (SImode
, CR0_REGNO
+ i
));
28068 RTX_FRAME_RELATED_P (insn
) = 1;
28072 for (i
= 0; i
< 8; i
++)
28073 if (save_reg_p (CR0_REGNO
+ i
))
28075 rtx insn
= emit_insn (gen_movsi_to_cr_one
28076 (gen_rtx_REG (CCmode
, CR0_REGNO
+ i
), reg
));
28078 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
28079 CR field separately, attached to the insn that in fact
28080 restores this particular CR field. */
28081 if (!exit_func
&& DEFAULT_ABI
== ABI_ELFv2
&& flag_shrink_wrap
)
28083 add_reg_note (insn
, REG_CFA_RESTORE
,
28084 gen_rtx_REG (SImode
, CR0_REGNO
+ i
));
28086 RTX_FRAME_RELATED_P (insn
) = 1;
28090 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
28091 if (!exit_func
&& DEFAULT_ABI
!= ABI_ELFv2
28092 && (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
))
28094 rtx_insn
*insn
= get_last_insn ();
28095 rtx cr
= gen_rtx_REG (SImode
, CR2_REGNO
);
28097 add_reg_note (insn
, REG_CFA_RESTORE
, cr
);
28098 RTX_FRAME_RELATED_P (insn
) = 1;
28102 /* Like cr, the move to lr instruction can be scheduled after the
28103 stack deallocation, but unlike cr, its stack frame save is still
28104 valid. So we only need to emit the cfa_restore on the correct
28108 load_lr_save (int regno
, rtx frame_reg_rtx
, int offset
)
28110 rtx mem
= gen_frame_mem_offset (Pmode
, frame_reg_rtx
, offset
);
28111 rtx reg
= gen_rtx_REG (Pmode
, regno
);
28113 emit_move_insn (reg
, mem
);
28117 restore_saved_lr (int regno
, bool exit_func
)
28119 rtx reg
= gen_rtx_REG (Pmode
, regno
);
28120 rtx lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
28121 rtx_insn
*insn
= emit_move_insn (lr
, reg
);
28123 if (!exit_func
&& flag_shrink_wrap
)
28125 add_reg_note (insn
, REG_CFA_RESTORE
, lr
);
28126 RTX_FRAME_RELATED_P (insn
) = 1;
28131 add_crlr_cfa_restore (const rs6000_stack_t
*info
, rtx cfa_restores
)
28133 if (DEFAULT_ABI
== ABI_ELFv2
)
28136 for (i
= 0; i
< 8; i
++)
28137 if (save_reg_p (CR0_REGNO
+ i
))
28139 rtx cr
= gen_rtx_REG (SImode
, CR0_REGNO
+ i
);
28140 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, cr
,
28144 else if (info
->cr_save_p
)
28145 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
,
28146 gen_rtx_REG (SImode
, CR2_REGNO
),
28149 if (info
->lr_save_p
)
28150 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
,
28151 gen_rtx_REG (Pmode
, LR_REGNO
),
28153 return cfa_restores
;
28156 /* Return true if OFFSET from stack pointer can be clobbered by signals.
28157 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
28158 below stack pointer not cloberred by signals. */
28161 offset_below_red_zone_p (HOST_WIDE_INT offset
)
28163 return offset
< (DEFAULT_ABI
== ABI_V4
28165 : TARGET_32BIT
? -220 : -288);
28168 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
28171 emit_cfa_restores (rtx cfa_restores
)
28173 rtx_insn
*insn
= get_last_insn ();
28174 rtx
*loc
= ®_NOTES (insn
);
28177 loc
= &XEXP (*loc
, 1);
28178 *loc
= cfa_restores
;
28179 RTX_FRAME_RELATED_P (insn
) = 1;
28182 /* Emit function epilogue as insns. */
28185 rs6000_emit_epilogue (int sibcall
)
28187 rs6000_stack_t
*info
;
28188 int restoring_GPRs_inline
;
28189 int restoring_FPRs_inline
;
28190 int using_load_multiple
;
28191 int using_mtcr_multiple
;
28192 int use_backchain_to_restore_sp
;
28195 HOST_WIDE_INT frame_off
= 0;
28196 rtx sp_reg_rtx
= gen_rtx_REG (Pmode
, 1);
28197 rtx frame_reg_rtx
= sp_reg_rtx
;
28198 rtx cfa_restores
= NULL_RTX
;
28200 rtx cr_save_reg
= NULL_RTX
;
28201 machine_mode reg_mode
= Pmode
;
28202 int reg_size
= TARGET_32BIT
? 4 : 8;
28203 machine_mode fp_reg_mode
= (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
28205 int fp_reg_size
= 8;
28208 unsigned ptr_regno
;
28210 info
= rs6000_stack_info ();
28212 strategy
= info
->savres_strategy
;
28213 using_load_multiple
= strategy
& REST_MULTIPLE
;
28214 restoring_FPRs_inline
= sibcall
|| (strategy
& REST_INLINE_FPRS
);
28215 restoring_GPRs_inline
= sibcall
|| (strategy
& REST_INLINE_GPRS
);
28216 using_mtcr_multiple
= (rs6000_cpu
== PROCESSOR_PPC601
28217 || rs6000_cpu
== PROCESSOR_PPC603
28218 || rs6000_cpu
== PROCESSOR_PPC750
28220 /* Restore via the backchain when we have a large frame, since this
28221 is more efficient than an addis, addi pair. The second condition
28222 here will not trigger at the moment; We don't actually need a
28223 frame pointer for alloca, but the generic parts of the compiler
28224 give us one anyway. */
28225 use_backchain_to_restore_sp
= (info
->total_size
+ (info
->lr_save_p
28226 ? info
->lr_save_offset
28228 || (cfun
->calls_alloca
28229 && !frame_pointer_needed
));
28230 restore_lr
= (info
->lr_save_p
28231 && (restoring_FPRs_inline
28232 || (strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
))
28233 && (restoring_GPRs_inline
28234 || info
->first_fp_reg_save
< 64)
28235 && !cfun
->machine
->lr_is_wrapped_separately
);
28238 if (WORLD_SAVE_P (info
))
28242 const char *alloc_rname
;
28245 /* eh_rest_world_r10 will return to the location saved in the LR
28246 stack slot (which is not likely to be our caller.)
28247 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
28248 rest_world is similar, except any R10 parameter is ignored.
28249 The exception-handling stuff that was here in 2.95 is no
28250 longer necessary. */
28253 + 32 - info
->first_gp_reg_save
28254 + LAST_ALTIVEC_REGNO
+ 1 - info
->first_altivec_reg_save
28255 + 63 + 1 - info
->first_fp_reg_save
);
28257 strcpy (rname
, ((crtl
->calls_eh_return
) ?
28258 "*eh_rest_world_r10" : "*rest_world"));
28259 alloc_rname
= ggc_strdup (rname
);
28262 RTVEC_ELT (p
, j
++) = ret_rtx
;
28264 = gen_rtx_USE (VOIDmode
, gen_rtx_SYMBOL_REF (Pmode
, alloc_rname
));
28265 /* The instruction pattern requires a clobber here;
28266 it is shared with the restVEC helper. */
28268 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, 11));
28271 /* CR register traditionally saved as CR2. */
28272 rtx reg
= gen_rtx_REG (SImode
, CR2_REGNO
);
28274 = gen_frame_load (reg
, frame_reg_rtx
, info
->cr_save_offset
);
28275 if (flag_shrink_wrap
)
28277 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
,
28278 gen_rtx_REG (Pmode
, LR_REGNO
),
28280 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
28284 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
28286 rtx reg
= gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
);
28288 = gen_frame_load (reg
,
28289 frame_reg_rtx
, info
->gp_save_offset
+ reg_size
* i
);
28290 if (flag_shrink_wrap
28291 && save_reg_p (info
->first_gp_reg_save
+ i
))
28292 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
28294 for (i
= 0; info
->first_altivec_reg_save
+ i
<= LAST_ALTIVEC_REGNO
; i
++)
28296 rtx reg
= gen_rtx_REG (V4SImode
, info
->first_altivec_reg_save
+ i
);
28298 = gen_frame_load (reg
,
28299 frame_reg_rtx
, info
->altivec_save_offset
+ 16 * i
);
28300 if (flag_shrink_wrap
28301 && save_reg_p (info
->first_altivec_reg_save
+ i
))
28302 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
28304 for (i
= 0; info
->first_fp_reg_save
+ i
<= 63; i
++)
28306 rtx reg
= gen_rtx_REG ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
28307 ? DFmode
: SFmode
),
28308 info
->first_fp_reg_save
+ i
);
28310 = gen_frame_load (reg
, frame_reg_rtx
, info
->fp_save_offset
+ 8 * i
);
28311 if (flag_shrink_wrap
28312 && save_reg_p (info
->first_fp_reg_save
+ i
))
28313 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
28316 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, 0));
28318 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 12));
28320 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 7));
28322 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 8));
28324 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (SImode
, 10));
28325 insn
= emit_jump_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
28327 if (flag_shrink_wrap
)
28329 REG_NOTES (insn
) = cfa_restores
;
28330 add_reg_note (insn
, REG_CFA_DEF_CFA
, sp_reg_rtx
);
28331 RTX_FRAME_RELATED_P (insn
) = 1;
28336 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
28338 frame_off
= info
->total_size
;
28340 /* Restore AltiVec registers if we must do so before adjusting the
28342 if (info
->altivec_size
!= 0
28343 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28344 || (DEFAULT_ABI
!= ABI_V4
28345 && offset_below_red_zone_p (info
->altivec_save_offset
))))
28348 int scratch_regno
= ptr_regno_for_savres (SAVRES_VR
);
28350 gcc_checking_assert (scratch_regno
== 11 || scratch_regno
== 12);
28351 if (use_backchain_to_restore_sp
)
28353 int frame_regno
= 11;
28355 if ((strategy
& REST_INLINE_VRS
) == 0)
28357 /* Of r11 and r12, select the one not clobbered by an
28358 out-of-line restore function for the frame register. */
28359 frame_regno
= 11 + 12 - scratch_regno
;
28361 frame_reg_rtx
= gen_rtx_REG (Pmode
, frame_regno
);
28362 emit_move_insn (frame_reg_rtx
,
28363 gen_rtx_MEM (Pmode
, sp_reg_rtx
));
28366 else if (frame_pointer_needed
)
28367 frame_reg_rtx
= hard_frame_pointer_rtx
;
28369 if ((strategy
& REST_INLINE_VRS
) == 0)
28371 int end_save
= info
->altivec_save_offset
+ info
->altivec_size
;
28373 rtx ptr_reg
= gen_rtx_REG (Pmode
, 0);
28374 rtx scratch_reg
= gen_rtx_REG (Pmode
, scratch_regno
);
28376 if (end_save
+ frame_off
!= 0)
28378 rtx offset
= GEN_INT (end_save
+ frame_off
);
28380 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
28383 emit_move_insn (ptr_reg
, frame_reg_rtx
);
28385 ptr_off
= -end_save
;
28386 insn
= rs6000_emit_savres_rtx (info
, scratch_reg
,
28387 info
->altivec_save_offset
+ ptr_off
,
28388 0, V4SImode
, SAVRES_VR
);
28392 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
28393 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
28395 rtx addr
, areg
, mem
, insn
;
28396 rtx reg
= gen_rtx_REG (V4SImode
, i
);
28397 HOST_WIDE_INT offset
28398 = (info
->altivec_save_offset
+ frame_off
28399 + 16 * (i
- info
->first_altivec_reg_save
));
28401 if (TARGET_P9_VECTOR
&& quad_address_offset_p (offset
))
28403 mem
= gen_frame_mem (V4SImode
,
28404 gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
28405 GEN_INT (offset
)));
28406 insn
= gen_rtx_SET (reg
, mem
);
28410 areg
= gen_rtx_REG (Pmode
, 0);
28411 emit_move_insn (areg
, GEN_INT (offset
));
28413 /* AltiVec addressing mode is [reg+reg]. */
28414 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
, areg
);
28415 mem
= gen_frame_mem (V4SImode
, addr
);
28417 /* Rather than emitting a generic move, force use of the
28418 lvx instruction, which we always want. In particular we
28419 don't want lxvd2x/xxpermdi for little endian. */
28420 insn
= gen_altivec_lvx_v4si_internal (reg
, mem
);
28423 (void) emit_insn (insn
);
28427 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
28428 if (((strategy
& REST_INLINE_VRS
) == 0
28429 || (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
)) != 0)
28430 && (flag_shrink_wrap
28431 || (offset_below_red_zone_p
28432 (info
->altivec_save_offset
28433 + 16 * (i
- info
->first_altivec_reg_save
))))
28436 rtx reg
= gen_rtx_REG (V4SImode
, i
);
28437 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
28441 /* Restore VRSAVE if we must do so before adjusting the stack. */
28442 if (info
->vrsave_size
!= 0
28443 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28444 || (DEFAULT_ABI
!= ABI_V4
28445 && offset_below_red_zone_p (info
->vrsave_save_offset
))))
28449 if (frame_reg_rtx
== sp_reg_rtx
)
28451 if (use_backchain_to_restore_sp
)
28453 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
28454 emit_move_insn (frame_reg_rtx
,
28455 gen_rtx_MEM (Pmode
, sp_reg_rtx
));
28458 else if (frame_pointer_needed
)
28459 frame_reg_rtx
= hard_frame_pointer_rtx
;
28462 reg
= gen_rtx_REG (SImode
, 12);
28463 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
28464 info
->vrsave_save_offset
+ frame_off
));
28466 emit_insn (generate_set_vrsave (reg
, info
, 1));
28470 /* If we have a large stack frame, restore the old stack pointer
28471 using the backchain. */
28472 if (use_backchain_to_restore_sp
)
28474 if (frame_reg_rtx
== sp_reg_rtx
)
28476 /* Under V.4, don't reset the stack pointer until after we're done
28477 loading the saved registers. */
28478 if (DEFAULT_ABI
== ABI_V4
)
28479 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
28481 insn
= emit_move_insn (frame_reg_rtx
,
28482 gen_rtx_MEM (Pmode
, sp_reg_rtx
));
28485 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28486 && DEFAULT_ABI
== ABI_V4
)
28487 /* frame_reg_rtx has been set up by the altivec restore. */
28491 insn
= emit_move_insn (sp_reg_rtx
, frame_reg_rtx
);
28492 frame_reg_rtx
= sp_reg_rtx
;
28495 /* If we have a frame pointer, we can restore the old stack pointer
28497 else if (frame_pointer_needed
)
28499 frame_reg_rtx
= sp_reg_rtx
;
28500 if (DEFAULT_ABI
== ABI_V4
)
28501 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
28502 /* Prevent reordering memory accesses against stack pointer restore. */
28503 else if (cfun
->calls_alloca
28504 || offset_below_red_zone_p (-info
->total_size
))
28505 rs6000_emit_stack_tie (frame_reg_rtx
, true);
28507 insn
= emit_insn (gen_add3_insn (frame_reg_rtx
, hard_frame_pointer_rtx
,
28508 GEN_INT (info
->total_size
)));
28511 else if (info
->push_p
28512 && DEFAULT_ABI
!= ABI_V4
28513 && !crtl
->calls_eh_return
)
28515 /* Prevent reordering memory accesses against stack pointer restore. */
28516 if (cfun
->calls_alloca
28517 || offset_below_red_zone_p (-info
->total_size
))
28518 rs6000_emit_stack_tie (frame_reg_rtx
, false);
28519 insn
= emit_insn (gen_add3_insn (sp_reg_rtx
, sp_reg_rtx
,
28520 GEN_INT (info
->total_size
)));
28523 if (insn
&& frame_reg_rtx
== sp_reg_rtx
)
28527 REG_NOTES (insn
) = cfa_restores
;
28528 cfa_restores
= NULL_RTX
;
28530 add_reg_note (insn
, REG_CFA_DEF_CFA
, sp_reg_rtx
);
28531 RTX_FRAME_RELATED_P (insn
) = 1;
28534 /* Restore AltiVec registers if we have not done so already. */
28535 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28536 && info
->altivec_size
!= 0
28537 && (DEFAULT_ABI
== ABI_V4
28538 || !offset_below_red_zone_p (info
->altivec_save_offset
)))
28542 if ((strategy
& REST_INLINE_VRS
) == 0)
28544 int end_save
= info
->altivec_save_offset
+ info
->altivec_size
;
28546 rtx ptr_reg
= gen_rtx_REG (Pmode
, 0);
28547 int scratch_regno
= ptr_regno_for_savres (SAVRES_VR
);
28548 rtx scratch_reg
= gen_rtx_REG (Pmode
, scratch_regno
);
28550 if (end_save
+ frame_off
!= 0)
28552 rtx offset
= GEN_INT (end_save
+ frame_off
);
28554 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
28557 emit_move_insn (ptr_reg
, frame_reg_rtx
);
28559 ptr_off
= -end_save
;
28560 insn
= rs6000_emit_savres_rtx (info
, scratch_reg
,
28561 info
->altivec_save_offset
+ ptr_off
,
28562 0, V4SImode
, SAVRES_VR
);
28563 if (REGNO (frame_reg_rtx
) == REGNO (scratch_reg
))
28565 /* Frame reg was clobbered by out-of-line save. Restore it
28566 from ptr_reg, and if we are calling out-of-line gpr or
28567 fpr restore set up the correct pointer and offset. */
28568 unsigned newptr_regno
= 1;
28569 if (!restoring_GPRs_inline
)
28571 bool lr
= info
->gp_save_offset
+ info
->gp_size
== 0;
28572 int sel
= SAVRES_GPR
| (lr
? SAVRES_LR
: 0);
28573 newptr_regno
= ptr_regno_for_savres (sel
);
28574 end_save
= info
->gp_save_offset
+ info
->gp_size
;
28576 else if (!restoring_FPRs_inline
)
28578 bool lr
= !(strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
);
28579 int sel
= SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
28580 newptr_regno
= ptr_regno_for_savres (sel
);
28581 end_save
= info
->fp_save_offset
+ info
->fp_size
;
28584 if (newptr_regno
!= 1 && REGNO (frame_reg_rtx
) != newptr_regno
)
28585 frame_reg_rtx
= gen_rtx_REG (Pmode
, newptr_regno
);
28587 if (end_save
+ ptr_off
!= 0)
28589 rtx offset
= GEN_INT (end_save
+ ptr_off
);
28591 frame_off
= -end_save
;
28593 emit_insn (gen_addsi3_carry (frame_reg_rtx
,
28596 emit_insn (gen_adddi3_carry (frame_reg_rtx
,
28601 frame_off
= ptr_off
;
28602 emit_move_insn (frame_reg_rtx
, ptr_reg
);
28608 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
28609 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
28611 rtx addr
, areg
, mem
, insn
;
28612 rtx reg
= gen_rtx_REG (V4SImode
, i
);
28613 HOST_WIDE_INT offset
28614 = (info
->altivec_save_offset
+ frame_off
28615 + 16 * (i
- info
->first_altivec_reg_save
));
28617 if (TARGET_P9_VECTOR
&& quad_address_offset_p (offset
))
28619 mem
= gen_frame_mem (V4SImode
,
28620 gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
28621 GEN_INT (offset
)));
28622 insn
= gen_rtx_SET (reg
, mem
);
28626 areg
= gen_rtx_REG (Pmode
, 0);
28627 emit_move_insn (areg
, GEN_INT (offset
));
28629 /* AltiVec addressing mode is [reg+reg]. */
28630 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
, areg
);
28631 mem
= gen_frame_mem (V4SImode
, addr
);
28633 /* Rather than emitting a generic move, force use of the
28634 lvx instruction, which we always want. In particular we
28635 don't want lxvd2x/xxpermdi for little endian. */
28636 insn
= gen_altivec_lvx_v4si_internal (reg
, mem
);
28639 (void) emit_insn (insn
);
28643 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
28644 if (((strategy
& REST_INLINE_VRS
) == 0
28645 || (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
)) != 0)
28646 && (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
)
28649 rtx reg
= gen_rtx_REG (V4SImode
, i
);
28650 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
28654 /* Restore VRSAVE if we have not done so already. */
28655 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28656 && info
->vrsave_size
!= 0
28657 && (DEFAULT_ABI
== ABI_V4
28658 || !offset_below_red_zone_p (info
->vrsave_save_offset
)))
28662 reg
= gen_rtx_REG (SImode
, 12);
28663 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
28664 info
->vrsave_save_offset
+ frame_off
));
28666 emit_insn (generate_set_vrsave (reg
, info
, 1));
28669 /* If we exit by an out-of-line restore function on ABI_V4 then that
28670 function will deallocate the stack, so we don't need to worry
28671 about the unwinder restoring cr from an invalid stack frame
28673 exit_func
= (!restoring_FPRs_inline
28674 || (!restoring_GPRs_inline
28675 && info
->first_fp_reg_save
== 64));
28677 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
28678 *separate* slots if the routine calls __builtin_eh_return, so
28679 that they can be independently restored by the unwinder. */
28680 if (DEFAULT_ABI
== ABI_ELFv2
&& crtl
->calls_eh_return
)
28682 int i
, cr_off
= info
->ehcr_offset
;
28684 for (i
= 0; i
< 8; i
++)
28685 if (!call_used_regs
[CR0_REGNO
+ i
])
28687 rtx reg
= gen_rtx_REG (SImode
, 0);
28688 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
28689 cr_off
+ frame_off
));
28691 insn
= emit_insn (gen_movsi_to_cr_one
28692 (gen_rtx_REG (CCmode
, CR0_REGNO
+ i
), reg
));
28694 if (!exit_func
&& flag_shrink_wrap
)
28696 add_reg_note (insn
, REG_CFA_RESTORE
,
28697 gen_rtx_REG (SImode
, CR0_REGNO
+ i
));
28699 RTX_FRAME_RELATED_P (insn
) = 1;
28702 cr_off
+= reg_size
;
28706 /* Get the old lr if we saved it. If we are restoring registers
28707 out-of-line, then the out-of-line routines can do this for us. */
28708 if (restore_lr
&& restoring_GPRs_inline
)
28709 load_lr_save (0, frame_reg_rtx
, info
->lr_save_offset
+ frame_off
);
28711 /* Get the old cr if we saved it. */
28712 if (info
->cr_save_p
)
28714 unsigned cr_save_regno
= 12;
28716 if (!restoring_GPRs_inline
)
28718 /* Ensure we don't use the register used by the out-of-line
28719 gpr register restore below. */
28720 bool lr
= info
->gp_save_offset
+ info
->gp_size
== 0;
28721 int sel
= SAVRES_GPR
| (lr
? SAVRES_LR
: 0);
28722 int gpr_ptr_regno
= ptr_regno_for_savres (sel
);
28724 if (gpr_ptr_regno
== 12)
28725 cr_save_regno
= 11;
28726 gcc_checking_assert (REGNO (frame_reg_rtx
) != cr_save_regno
);
28728 else if (REGNO (frame_reg_rtx
) == 12)
28729 cr_save_regno
= 11;
28731 cr_save_reg
= load_cr_save (cr_save_regno
, frame_reg_rtx
,
28732 info
->cr_save_offset
+ frame_off
,
28736 /* Set LR here to try to overlap restores below. */
28737 if (restore_lr
&& restoring_GPRs_inline
)
28738 restore_saved_lr (0, exit_func
);
28740 /* Load exception handler data registers, if needed. */
28741 if (crtl
->calls_eh_return
)
28743 unsigned int i
, regno
;
28747 rtx reg
= gen_rtx_REG (reg_mode
, 2);
28748 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
28749 frame_off
+ RS6000_TOC_SAVE_SLOT
));
28756 regno
= EH_RETURN_DATA_REGNO (i
);
28757 if (regno
== INVALID_REGNUM
)
28760 mem
= gen_frame_mem_offset (reg_mode
, frame_reg_rtx
,
28761 info
->ehrd_offset
+ frame_off
28762 + reg_size
* (int) i
);
28764 emit_move_insn (gen_rtx_REG (reg_mode
, regno
), mem
);
28768 /* Restore GPRs. This is done as a PARALLEL if we are using
28769 the load-multiple instructions. */
28770 if (!restoring_GPRs_inline
)
28772 /* We are jumping to an out-of-line function. */
28774 int end_save
= info
->gp_save_offset
+ info
->gp_size
;
28775 bool can_use_exit
= end_save
== 0;
28776 int sel
= SAVRES_GPR
| (can_use_exit
? SAVRES_LR
: 0);
28779 /* Emit stack reset code if we need it. */
28780 ptr_regno
= ptr_regno_for_savres (sel
);
28781 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
28783 rs6000_emit_stack_reset (frame_reg_rtx
, frame_off
, ptr_regno
);
28784 else if (end_save
+ frame_off
!= 0)
28785 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
,
28786 GEN_INT (end_save
+ frame_off
)));
28787 else if (REGNO (frame_reg_rtx
) != ptr_regno
)
28788 emit_move_insn (ptr_reg
, frame_reg_rtx
);
28789 if (REGNO (frame_reg_rtx
) == ptr_regno
)
28790 frame_off
= -end_save
;
28792 if (can_use_exit
&& info
->cr_save_p
)
28793 restore_saved_cr (cr_save_reg
, using_mtcr_multiple
, true);
28795 ptr_off
= -end_save
;
28796 rs6000_emit_savres_rtx (info
, ptr_reg
,
28797 info
->gp_save_offset
+ ptr_off
,
28798 info
->lr_save_offset
+ ptr_off
,
28801 else if (using_load_multiple
)
28804 p
= rtvec_alloc (32 - info
->first_gp_reg_save
);
28805 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
28807 = gen_frame_load (gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
),
28809 info
->gp_save_offset
+ frame_off
+ reg_size
* i
);
28810 emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
28814 int offset
= info
->gp_save_offset
+ frame_off
;
28815 for (i
= info
->first_gp_reg_save
; i
< 32; i
++)
28818 && !cfun
->machine
->gpr_is_wrapped_separately
[i
])
28820 rtx reg
= gen_rtx_REG (reg_mode
, i
);
28821 emit_insn (gen_frame_load (reg
, frame_reg_rtx
, offset
));
28824 offset
+= reg_size
;
28828 if (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
)
28830 /* If the frame pointer was used then we can't delay emitting
28831 a REG_CFA_DEF_CFA note. This must happen on the insn that
28832 restores the frame pointer, r31. We may have already emitted
28833 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
28834 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
28835 be harmless if emitted. */
28836 if (frame_pointer_needed
)
28838 insn
= get_last_insn ();
28839 add_reg_note (insn
, REG_CFA_DEF_CFA
,
28840 plus_constant (Pmode
, frame_reg_rtx
, frame_off
));
28841 RTX_FRAME_RELATED_P (insn
) = 1;
28844 /* Set up cfa_restores. We always need these when
28845 shrink-wrapping. If not shrink-wrapping then we only need
28846 the cfa_restore when the stack location is no longer valid.
28847 The cfa_restores must be emitted on or before the insn that
28848 invalidates the stack, and of course must not be emitted
28849 before the insn that actually does the restore. The latter
28850 is why it is a bad idea to emit the cfa_restores as a group
28851 on the last instruction here that actually does a restore:
28852 That insn may be reordered with respect to others doing
28854 if (flag_shrink_wrap
28855 && !restoring_GPRs_inline
28856 && info
->first_fp_reg_save
== 64)
28857 cfa_restores
= add_crlr_cfa_restore (info
, cfa_restores
);
28859 for (i
= info
->first_gp_reg_save
; i
< 32; i
++)
28861 && !cfun
->machine
->gpr_is_wrapped_separately
[i
])
28863 rtx reg
= gen_rtx_REG (reg_mode
, i
);
28864 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
28868 if (!restoring_GPRs_inline
28869 && info
->first_fp_reg_save
== 64)
28871 /* We are jumping to an out-of-line function. */
28873 emit_cfa_restores (cfa_restores
);
28877 if (restore_lr
&& !restoring_GPRs_inline
)
28879 load_lr_save (0, frame_reg_rtx
, info
->lr_save_offset
+ frame_off
);
28880 restore_saved_lr (0, exit_func
);
28883 /* Restore fpr's if we need to do it without calling a function. */
28884 if (restoring_FPRs_inline
)
28886 int offset
= info
->fp_save_offset
+ frame_off
;
28887 for (i
= info
->first_fp_reg_save
; i
< 64; i
++)
28890 && !cfun
->machine
->fpr_is_wrapped_separately
[i
- 32])
28892 rtx reg
= gen_rtx_REG (fp_reg_mode
, i
);
28893 emit_insn (gen_frame_load (reg
, frame_reg_rtx
, offset
));
28894 if (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
)
28895 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
,
28899 offset
+= fp_reg_size
;
28903 /* If we saved cr, restore it here. Just those that were used. */
28904 if (info
->cr_save_p
)
28905 restore_saved_cr (cr_save_reg
, using_mtcr_multiple
, exit_func
);
28907 /* If this is V.4, unwind the stack pointer after all of the loads
28908 have been done, or set up r11 if we are restoring fp out of line. */
28910 if (!restoring_FPRs_inline
)
28912 bool lr
= (strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
) == 0;
28913 int sel
= SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
28914 ptr_regno
= ptr_regno_for_savres (sel
);
28917 insn
= rs6000_emit_stack_reset (frame_reg_rtx
, frame_off
, ptr_regno
);
28918 if (REGNO (frame_reg_rtx
) == ptr_regno
)
28921 if (insn
&& restoring_FPRs_inline
)
28925 REG_NOTES (insn
) = cfa_restores
;
28926 cfa_restores
= NULL_RTX
;
28928 add_reg_note (insn
, REG_CFA_DEF_CFA
, sp_reg_rtx
);
28929 RTX_FRAME_RELATED_P (insn
) = 1;
28932 if (crtl
->calls_eh_return
)
28934 rtx sa
= EH_RETURN_STACKADJ_RTX
;
28935 emit_insn (gen_add3_insn (sp_reg_rtx
, sp_reg_rtx
, sa
));
28938 if (!sibcall
&& restoring_FPRs_inline
)
28942 /* We can't hang the cfa_restores off a simple return,
28943 since the shrink-wrap code sometimes uses an existing
28944 return. This means there might be a path from
28945 pre-prologue code to this return, and dwarf2cfi code
28946 wants the eh_frame unwinder state to be the same on
28947 all paths to any point. So we need to emit the
28948 cfa_restores before the return. For -m64 we really
28949 don't need epilogue cfa_restores at all, except for
28950 this irritating dwarf2cfi with shrink-wrap
28951 requirement; The stack red-zone means eh_frame info
28952 from the prologue telling the unwinder to restore
28953 from the stack is perfectly good right to the end of
28955 emit_insn (gen_blockage ());
28956 emit_cfa_restores (cfa_restores
);
28957 cfa_restores
= NULL_RTX
;
28960 emit_jump_insn (targetm
.gen_simple_return ());
28963 if (!sibcall
&& !restoring_FPRs_inline
)
28965 bool lr
= (strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
) == 0;
28966 rtvec p
= rtvec_alloc (3 + !!lr
+ 64 - info
->first_fp_reg_save
);
28968 RTVEC_ELT (p
, elt
++) = ret_rtx
;
28970 RTVEC_ELT (p
, elt
++)
28971 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, LR_REGNO
));
28973 /* We have to restore more than two FP registers, so branch to the
28974 restore function. It will return to our caller. */
28979 if (flag_shrink_wrap
)
28980 cfa_restores
= add_crlr_cfa_restore (info
, cfa_restores
);
28982 sym
= rs6000_savres_routine_sym (info
, SAVRES_FPR
| (lr
? SAVRES_LR
: 0));
28983 RTVEC_ELT (p
, elt
++) = gen_rtx_USE (VOIDmode
, sym
);
28984 reg
= (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)? 1 : 11;
28985 RTVEC_ELT (p
, elt
++) = gen_rtx_USE (VOIDmode
, gen_rtx_REG (Pmode
, reg
));
28987 for (i
= 0; i
< 64 - info
->first_fp_reg_save
; i
++)
28989 rtx reg
= gen_rtx_REG (DFmode
, info
->first_fp_reg_save
+ i
);
28991 RTVEC_ELT (p
, elt
++)
28992 = gen_frame_load (reg
, sp_reg_rtx
, info
->fp_save_offset
+ 8 * i
);
28993 if (flag_shrink_wrap
28994 && save_reg_p (info
->first_fp_reg_save
+ i
))
28995 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
28998 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
29004 /* Ensure the cfa_restores are hung off an insn that won't
29005 be reordered above other restores. */
29006 emit_insn (gen_blockage ());
29008 emit_cfa_restores (cfa_restores
);
29012 /* Write function epilogue. */
29015 rs6000_output_function_epilogue (FILE *file
)
29018 macho_branch_islands ();
29021 rtx_insn
*insn
= get_last_insn ();
29022 rtx_insn
*deleted_debug_label
= NULL
;
29024 /* Mach-O doesn't support labels at the end of objects, so if
29025 it looks like we might want one, take special action.
29027 First, collect any sequence of deleted debug labels. */
29030 && NOTE_KIND (insn
) != NOTE_INSN_DELETED_LABEL
)
29032 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
29033 notes only, instead set their CODE_LABEL_NUMBER to -1,
29034 otherwise there would be code generation differences
29035 in between -g and -g0. */
29036 if (NOTE_P (insn
) && NOTE_KIND (insn
) == NOTE_INSN_DELETED_DEBUG_LABEL
)
29037 deleted_debug_label
= insn
;
29038 insn
= PREV_INSN (insn
);
29041 /* Second, if we have:
29044 then this needs to be detected, so skip past the barrier. */
29046 if (insn
&& BARRIER_P (insn
))
29047 insn
= PREV_INSN (insn
);
29049 /* Up to now we've only seen notes or barriers. */
29054 && NOTE_KIND (insn
) == NOTE_INSN_DELETED_LABEL
))
29055 /* Trailing label: <barrier>. */
29056 fputs ("\tnop\n", file
);
29059 /* Lastly, see if we have a completely empty function body. */
29060 while (insn
&& ! INSN_P (insn
))
29061 insn
= PREV_INSN (insn
);
29062 /* If we don't find any insns, we've got an empty function body;
29063 I.e. completely empty - without a return or branch. This is
29064 taken as the case where a function body has been removed
29065 because it contains an inline __builtin_unreachable(). GCC
29066 states that reaching __builtin_unreachable() means UB so we're
29067 not obliged to do anything special; however, we want
29068 non-zero-sized function bodies. To meet this, and help the
29069 user out, let's trap the case. */
29071 fputs ("\ttrap\n", file
);
29074 else if (deleted_debug_label
)
29075 for (insn
= deleted_debug_label
; insn
; insn
= NEXT_INSN (insn
))
29076 if (NOTE_KIND (insn
) == NOTE_INSN_DELETED_DEBUG_LABEL
)
29077 CODE_LABEL_NUMBER (insn
) = -1;
29081 /* Output a traceback table here. See /usr/include/sys/debug.h for info
29084 We don't output a traceback table if -finhibit-size-directive was
29085 used. The documentation for -finhibit-size-directive reads
29086 ``don't output a @code{.size} assembler directive, or anything
29087 else that would cause trouble if the function is split in the
29088 middle, and the two halves are placed at locations far apart in
29089 memory.'' The traceback table has this property, since it
29090 includes the offset from the start of the function to the
29091 traceback table itself.
29093 System V.4 Powerpc's (and the embedded ABI derived from it) use a
29094 different traceback table. */
29095 if ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
29096 && ! flag_inhibit_size_directive
29097 && rs6000_traceback
!= traceback_none
&& !cfun
->is_thunk
)
29099 const char *fname
= NULL
;
29100 const char *language_string
= lang_hooks
.name
;
29101 int fixed_parms
= 0, float_parms
= 0, parm_info
= 0;
29103 int optional_tbtab
;
29104 rs6000_stack_t
*info
= rs6000_stack_info ();
29106 if (rs6000_traceback
== traceback_full
)
29107 optional_tbtab
= 1;
29108 else if (rs6000_traceback
== traceback_part
)
29109 optional_tbtab
= 0;
29111 optional_tbtab
= !optimize_size
&& !TARGET_ELF
;
29113 if (optional_tbtab
)
29115 fname
= XSTR (XEXP (DECL_RTL (current_function_decl
), 0), 0);
29116 while (*fname
== '.') /* V.4 encodes . in the name */
29119 /* Need label immediately before tbtab, so we can compute
29120 its offset from the function start. */
29121 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LT");
29122 ASM_OUTPUT_LABEL (file
, fname
);
29125 /* The .tbtab pseudo-op can only be used for the first eight
29126 expressions, since it can't handle the possibly variable
29127 length fields that follow. However, if you omit the optional
29128 fields, the assembler outputs zeros for all optional fields
29129 anyways, giving each variable length field is minimum length
29130 (as defined in sys/debug.h). Thus we can not use the .tbtab
29131 pseudo-op at all. */
29133 /* An all-zero word flags the start of the tbtab, for debuggers
29134 that have to find it by searching forward from the entry
29135 point or from the current pc. */
29136 fputs ("\t.long 0\n", file
);
29138 /* Tbtab format type. Use format type 0. */
29139 fputs ("\t.byte 0,", file
);
29141 /* Language type. Unfortunately, there does not seem to be any
29142 official way to discover the language being compiled, so we
29143 use language_string.
29144 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
29145 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
29146 a number, so for now use 9. LTO, Go and JIT aren't assigned numbers
29147 either, so for now use 0. */
29149 || ! strcmp (language_string
, "GNU GIMPLE")
29150 || ! strcmp (language_string
, "GNU Go")
29151 || ! strcmp (language_string
, "libgccjit"))
29153 else if (! strcmp (language_string
, "GNU F77")
29154 || lang_GNU_Fortran ())
29156 else if (! strcmp (language_string
, "GNU Pascal"))
29158 else if (! strcmp (language_string
, "GNU Ada"))
29160 else if (lang_GNU_CXX ()
29161 || ! strcmp (language_string
, "GNU Objective-C++"))
29163 else if (! strcmp (language_string
, "GNU Java"))
29165 else if (! strcmp (language_string
, "GNU Objective-C"))
29168 gcc_unreachable ();
29169 fprintf (file
, "%d,", i
);
29171 /* 8 single bit fields: global linkage (not set for C extern linkage,
29172 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
29173 from start of procedure stored in tbtab, internal function, function
29174 has controlled storage, function has no toc, function uses fp,
29175 function logs/aborts fp operations. */
29176 /* Assume that fp operations are used if any fp reg must be saved. */
29177 fprintf (file
, "%d,",
29178 (optional_tbtab
<< 5) | ((info
->first_fp_reg_save
!= 64) << 1));
29180 /* 6 bitfields: function is interrupt handler, name present in
29181 proc table, function calls alloca, on condition directives
29182 (controls stack walks, 3 bits), saves condition reg, saves
29184 /* The `function calls alloca' bit seems to be set whenever reg 31 is
29185 set up as a frame pointer, even when there is no alloca call. */
29186 fprintf (file
, "%d,",
29187 ((optional_tbtab
<< 6)
29188 | ((optional_tbtab
& frame_pointer_needed
) << 5)
29189 | (info
->cr_save_p
<< 1)
29190 | (info
->lr_save_p
)));
29192 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
29194 fprintf (file
, "%d,",
29195 (info
->push_p
<< 7) | (64 - info
->first_fp_reg_save
));
29197 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
29198 fprintf (file
, "%d,", (32 - first_reg_to_save ()));
29200 if (optional_tbtab
)
29202 /* Compute the parameter info from the function decl argument
29205 int next_parm_info_bit
= 31;
29207 for (decl
= DECL_ARGUMENTS (current_function_decl
);
29208 decl
; decl
= DECL_CHAIN (decl
))
29210 rtx parameter
= DECL_INCOMING_RTL (decl
);
29211 machine_mode mode
= GET_MODE (parameter
);
29213 if (GET_CODE (parameter
) == REG
)
29215 if (SCALAR_FLOAT_MODE_P (mode
))
29238 gcc_unreachable ();
29241 /* If only one bit will fit, don't or in this entry. */
29242 if (next_parm_info_bit
> 0)
29243 parm_info
|= (bits
<< (next_parm_info_bit
- 1));
29244 next_parm_info_bit
-= 2;
29248 fixed_parms
+= ((GET_MODE_SIZE (mode
)
29249 + (UNITS_PER_WORD
- 1))
29251 next_parm_info_bit
-= 1;
29257 /* Number of fixed point parameters. */
29258 /* This is actually the number of words of fixed point parameters; thus
29259 an 8 byte struct counts as 2; and thus the maximum value is 8. */
29260 fprintf (file
, "%d,", fixed_parms
);
29262 /* 2 bitfields: number of floating point parameters (7 bits), parameters
29264 /* This is actually the number of fp registers that hold parameters;
29265 and thus the maximum value is 13. */
29266 /* Set parameters on stack bit if parameters are not in their original
29267 registers, regardless of whether they are on the stack? Xlc
29268 seems to set the bit when not optimizing. */
29269 fprintf (file
, "%d\n", ((float_parms
<< 1) | (! optimize
)));
29271 if (optional_tbtab
)
29273 /* Optional fields follow. Some are variable length. */
29275 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single
29276 float, 11 double float. */
29277 /* There is an entry for each parameter in a register, in the order
29278 that they occur in the parameter list. Any intervening arguments
29279 on the stack are ignored. If the list overflows a long (max
29280 possible length 34 bits) then completely leave off all elements
29282 /* Only emit this long if there was at least one parameter. */
29283 if (fixed_parms
|| float_parms
)
29284 fprintf (file
, "\t.long %d\n", parm_info
);
29286 /* Offset from start of code to tb table. */
29287 fputs ("\t.long ", file
);
29288 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LT");
29289 RS6000_OUTPUT_BASENAME (file
, fname
);
29291 rs6000_output_function_entry (file
, fname
);
29294 /* Interrupt handler mask. */
29295 /* Omit this long, since we never set the interrupt handler bit
29298 /* Number of CTL (controlled storage) anchors. */
29299 /* Omit this long, since the has_ctl bit is never set above. */
29301 /* Displacement into stack of each CTL anchor. */
29302 /* Omit this list of longs, because there are no CTL anchors. */
29304 /* Length of function name. */
29307 fprintf (file
, "\t.short %d\n", (int) strlen (fname
));
29309 /* Function name. */
29310 assemble_string (fname
, strlen (fname
));
29312 /* Register for alloca automatic storage; this is always reg 31.
29313 Only emit this if the alloca bit was set above. */
29314 if (frame_pointer_needed
)
29315 fputs ("\t.byte 31\n", file
);
29317 fputs ("\t.align 2\n", file
);
29321 /* Arrange to define .LCTOC1 label, if not already done. */
29325 if (!toc_initialized
)
29327 switch_to_section (toc_section
);
29328 switch_to_section (current_function_section ());
29333 /* -fsplit-stack support. */
29335 /* A SYMBOL_REF for __morestack. */
29336 static GTY(()) rtx morestack_ref
;
29339 gen_add3_const (rtx rt
, rtx ra
, long c
)
29342 return gen_adddi3 (rt
, ra
, GEN_INT (c
));
29344 return gen_addsi3 (rt
, ra
, GEN_INT (c
));
29347 /* Emit -fsplit-stack prologue, which goes before the regular function
29348 prologue (at local entry point in the case of ELFv2). */
29351 rs6000_expand_split_stack_prologue (void)
29353 rs6000_stack_t
*info
= rs6000_stack_info ();
29354 unsigned HOST_WIDE_INT allocate
;
29355 long alloc_hi
, alloc_lo
;
29356 rtx r0
, r1
, r12
, lr
, ok_label
, compare
, jump
, call_fusage
;
29359 gcc_assert (flag_split_stack
&& reload_completed
);
29364 if (global_regs
[29])
29366 error ("%qs uses register r29", "-fsplit-stack");
29367 inform (DECL_SOURCE_LOCATION (global_regs_decl
[29]),
29368 "conflicts with %qD", global_regs_decl
[29]);
29371 allocate
= info
->total_size
;
29372 if (allocate
> (unsigned HOST_WIDE_INT
) 1 << 31)
29374 sorry ("Stack frame larger than 2G is not supported for -fsplit-stack");
29377 if (morestack_ref
== NULL_RTX
)
29379 morestack_ref
= gen_rtx_SYMBOL_REF (Pmode
, "__morestack");
29380 SYMBOL_REF_FLAGS (morestack_ref
) |= (SYMBOL_FLAG_LOCAL
29381 | SYMBOL_FLAG_FUNCTION
);
29384 r0
= gen_rtx_REG (Pmode
, 0);
29385 r1
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
29386 r12
= gen_rtx_REG (Pmode
, 12);
29387 emit_insn (gen_load_split_stack_limit (r0
));
29388 /* Always emit two insns here to calculate the requested stack,
29389 so that the linker can edit them when adjusting size for calling
29390 non-split-stack code. */
29391 alloc_hi
= (-allocate
+ 0x8000) & ~0xffffL
;
29392 alloc_lo
= -allocate
- alloc_hi
;
29395 emit_insn (gen_add3_const (r12
, r1
, alloc_hi
));
29397 emit_insn (gen_add3_const (r12
, r12
, alloc_lo
));
29399 emit_insn (gen_nop ());
29403 emit_insn (gen_add3_const (r12
, r1
, alloc_lo
));
29404 emit_insn (gen_nop ());
29407 compare
= gen_rtx_REG (CCUNSmode
, CR7_REGNO
);
29408 emit_insn (gen_rtx_SET (compare
, gen_rtx_COMPARE (CCUNSmode
, r12
, r0
)));
29409 ok_label
= gen_label_rtx ();
29410 jump
= gen_rtx_IF_THEN_ELSE (VOIDmode
,
29411 gen_rtx_GEU (VOIDmode
, compare
, const0_rtx
),
29412 gen_rtx_LABEL_REF (VOIDmode
, ok_label
),
29414 insn
= emit_jump_insn (gen_rtx_SET (pc_rtx
, jump
));
29415 JUMP_LABEL (insn
) = ok_label
;
29416 /* Mark the jump as very likely to be taken. */
29417 add_reg_br_prob_note (insn
, profile_probability::very_likely ());
29419 lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
29420 insn
= emit_move_insn (r0
, lr
);
29421 RTX_FRAME_RELATED_P (insn
) = 1;
29422 insn
= emit_insn (gen_frame_store (r0
, r1
, info
->lr_save_offset
));
29423 RTX_FRAME_RELATED_P (insn
) = 1;
29425 insn
= emit_call_insn (gen_call (gen_rtx_MEM (SImode
, morestack_ref
),
29426 const0_rtx
, const0_rtx
));
29427 call_fusage
= NULL_RTX
;
29428 use_reg (&call_fusage
, r12
);
29429 /* Say the call uses r0, even though it doesn't, to stop regrename
29430 from twiddling with the insns saving lr, trashing args for cfun.
29431 The insns restoring lr are similarly protected by making
29432 split_stack_return use r0. */
29433 use_reg (&call_fusage
, r0
);
29434 add_function_usage_to (insn
, call_fusage
);
29435 /* Indicate that this function can't jump to non-local gotos. */
29436 make_reg_eh_region_note_nothrow_nononlocal (insn
);
29437 emit_insn (gen_frame_load (r0
, r1
, info
->lr_save_offset
));
29438 insn
= emit_move_insn (lr
, r0
);
29439 add_reg_note (insn
, REG_CFA_RESTORE
, lr
);
29440 RTX_FRAME_RELATED_P (insn
) = 1;
29441 emit_insn (gen_split_stack_return ());
29443 emit_label (ok_label
);
29444 LABEL_NUSES (ok_label
) = 1;
29447 /* Return the internal arg pointer used for function incoming
29448 arguments. When -fsplit-stack, the arg pointer is r12 so we need
29449 to copy it to a pseudo in order for it to be preserved over calls
29450 and suchlike. We'd really like to use a pseudo here for the
29451 internal arg pointer but data-flow analysis is not prepared to
29452 accept pseudos as live at the beginning of a function. */
29455 rs6000_internal_arg_pointer (void)
29457 if (flag_split_stack
29458 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun
->decl
))
29462 if (cfun
->machine
->split_stack_arg_pointer
== NULL_RTX
)
29466 cfun
->machine
->split_stack_arg_pointer
= gen_reg_rtx (Pmode
);
29467 REG_POINTER (cfun
->machine
->split_stack_arg_pointer
) = 1;
29469 /* Put the pseudo initialization right after the note at the
29470 beginning of the function. */
29471 pat
= gen_rtx_SET (cfun
->machine
->split_stack_arg_pointer
,
29472 gen_rtx_REG (Pmode
, 12));
29473 push_topmost_sequence ();
29474 emit_insn_after (pat
, get_insns ());
29475 pop_topmost_sequence ();
29477 return plus_constant (Pmode
, cfun
->machine
->split_stack_arg_pointer
,
29478 FIRST_PARM_OFFSET (current_function_decl
));
29480 return virtual_incoming_args_rtx
;
29483 /* We may have to tell the dataflow pass that the split stack prologue
29484 is initializing a register. */
29487 rs6000_live_on_entry (bitmap regs
)
29489 if (flag_split_stack
)
29490 bitmap_set_bit (regs
, 12);
29493 /* Emit -fsplit-stack dynamic stack allocation space check. */
29496 rs6000_split_stack_space_check (rtx size
, rtx label
)
29498 rtx sp
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
29499 rtx limit
= gen_reg_rtx (Pmode
);
29500 rtx requested
= gen_reg_rtx (Pmode
);
29501 rtx cmp
= gen_reg_rtx (CCUNSmode
);
29504 emit_insn (gen_load_split_stack_limit (limit
));
29505 if (CONST_INT_P (size
))
29506 emit_insn (gen_add3_insn (requested
, sp
, GEN_INT (-INTVAL (size
))));
29509 size
= force_reg (Pmode
, size
);
29510 emit_move_insn (requested
, gen_rtx_MINUS (Pmode
, sp
, size
));
29512 emit_insn (gen_rtx_SET (cmp
, gen_rtx_COMPARE (CCUNSmode
, requested
, limit
)));
29513 jump
= gen_rtx_IF_THEN_ELSE (VOIDmode
,
29514 gen_rtx_GEU (VOIDmode
, cmp
, const0_rtx
),
29515 gen_rtx_LABEL_REF (VOIDmode
, label
),
29517 jump
= emit_jump_insn (gen_rtx_SET (pc_rtx
, jump
));
29518 JUMP_LABEL (jump
) = label
;
29521 /* A C compound statement that outputs the assembler code for a thunk
29522 function, used to implement C++ virtual function calls with
29523 multiple inheritance. The thunk acts as a wrapper around a virtual
29524 function, adjusting the implicit object parameter before handing
29525 control off to the real function.
29527 First, emit code to add the integer DELTA to the location that
29528 contains the incoming first argument. Assume that this argument
29529 contains a pointer, and is the one used to pass the `this' pointer
29530 in C++. This is the incoming argument *before* the function
29531 prologue, e.g. `%o0' on a sparc. The addition must preserve the
29532 values of all other incoming arguments.
29534 After the addition, emit code to jump to FUNCTION, which is a
29535 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
29536 not touch the return address. Hence returning from FUNCTION will
29537 return to whoever called the current `thunk'.
29539 The effect must be as if FUNCTION had been called directly with the
29540 adjusted first argument. This macro is responsible for emitting
29541 all of the code for a thunk function; output_function_prologue()
29542 and output_function_epilogue() are not invoked.
29544 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
29545 been extracted from it.) It might possibly be useful on some
29546 targets, but probably not.
29548 If you do not define this macro, the target-independent code in the
29549 C++ frontend will generate a less efficient heavyweight thunk that
29550 calls FUNCTION instead of jumping to it. The generic approach does
29551 not support varargs. */
29554 rs6000_output_mi_thunk (FILE *file
, tree thunk_fndecl ATTRIBUTE_UNUSED
,
29555 HOST_WIDE_INT delta
, HOST_WIDE_INT vcall_offset
,
29558 rtx this_rtx
, funexp
;
29561 reload_completed
= 1;
29562 epilogue_completed
= 1;
29564 /* Mark the end of the (empty) prologue. */
29565 emit_note (NOTE_INSN_PROLOGUE_END
);
29567 /* Find the "this" pointer. If the function returns a structure,
29568 the structure return pointer is in r3. */
29569 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function
)), function
))
29570 this_rtx
= gen_rtx_REG (Pmode
, 4);
29572 this_rtx
= gen_rtx_REG (Pmode
, 3);
29574 /* Apply the constant offset, if required. */
29576 emit_insn (gen_add3_insn (this_rtx
, this_rtx
, GEN_INT (delta
)));
29578 /* Apply the offset from the vtable, if required. */
29581 rtx vcall_offset_rtx
= GEN_INT (vcall_offset
);
29582 rtx tmp
= gen_rtx_REG (Pmode
, 12);
29584 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, this_rtx
));
29585 if (((unsigned HOST_WIDE_INT
) vcall_offset
) + 0x8000 >= 0x10000)
29587 emit_insn (gen_add3_insn (tmp
, tmp
, vcall_offset_rtx
));
29588 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, tmp
));
29592 rtx loc
= gen_rtx_PLUS (Pmode
, tmp
, vcall_offset_rtx
);
29594 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, loc
));
29596 emit_insn (gen_add3_insn (this_rtx
, this_rtx
, tmp
));
29599 /* Generate a tail call to the target function. */
29600 if (!TREE_USED (function
))
29602 assemble_external (function
);
29603 TREE_USED (function
) = 1;
29605 funexp
= XEXP (DECL_RTL (function
), 0);
29606 funexp
= gen_rtx_MEM (FUNCTION_MODE
, funexp
);
29609 if (MACHOPIC_INDIRECT
)
29610 funexp
= machopic_indirect_call_target (funexp
);
29613 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
29614 generate sibcall RTL explicitly. */
29615 insn
= emit_call_insn (
29616 gen_rtx_PARALLEL (VOIDmode
,
29618 gen_rtx_CALL (VOIDmode
,
29619 funexp
, const0_rtx
),
29620 gen_rtx_USE (VOIDmode
, const0_rtx
),
29621 simple_return_rtx
)));
29622 SIBLING_CALL_P (insn
) = 1;
29625 /* Run just enough of rest_of_compilation to get the insns emitted.
29626 There's not really enough bulk here to make other passes such as
29627 instruction scheduling worth while. Note that use_thunk calls
29628 assemble_start_function and assemble_end_function. */
29629 insn
= get_insns ();
29630 shorten_branches (insn
);
29631 final_start_function (insn
, file
, 1);
29632 final (insn
, file
, 1);
29633 final_end_function ();
29635 reload_completed
= 0;
29636 epilogue_completed
= 0;
29639 /* A quick summary of the various types of 'constant-pool tables'
29642 Target Flags Name One table per
29643 AIX (none) AIX TOC object file
29644 AIX -mfull-toc AIX TOC object file
29645 AIX -mminimal-toc AIX minimal TOC translation unit
29646 SVR4/EABI (none) SVR4 SDATA object file
29647 SVR4/EABI -fpic SVR4 pic object file
29648 SVR4/EABI -fPIC SVR4 PIC translation unit
29649 SVR4/EABI -mrelocatable EABI TOC function
29650 SVR4/EABI -maix AIX TOC object file
29651 SVR4/EABI -maix -mminimal-toc
29652 AIX minimal TOC translation unit
29654 Name Reg. Set by entries contains:
29655 made by addrs? fp? sum?
29657 AIX TOC 2 crt0 as Y option option
29658 AIX minimal TOC 30 prolog gcc Y Y option
29659 SVR4 SDATA 13 crt0 gcc N Y N
29660 SVR4 pic 30 prolog ld Y not yet N
29661 SVR4 PIC 30 prolog gcc Y option option
29662 EABI TOC 30 prolog gcc Y option option
29666 /* Hash functions for the hash table. */
29669 rs6000_hash_constant (rtx k
)
29671 enum rtx_code code
= GET_CODE (k
);
29672 machine_mode mode
= GET_MODE (k
);
29673 unsigned result
= (code
<< 3) ^ mode
;
29674 const char *format
;
29677 format
= GET_RTX_FORMAT (code
);
29678 flen
= strlen (format
);
29684 return result
* 1231 + (unsigned) INSN_UID (XEXP (k
, 0));
29686 case CONST_WIDE_INT
:
29689 flen
= CONST_WIDE_INT_NUNITS (k
);
29690 for (i
= 0; i
< flen
; i
++)
29691 result
= result
* 613 + CONST_WIDE_INT_ELT (k
, i
);
29696 if (mode
!= VOIDmode
)
29697 return real_hash (CONST_DOUBLE_REAL_VALUE (k
)) * result
;
29709 for (; fidx
< flen
; fidx
++)
29710 switch (format
[fidx
])
29715 const char *str
= XSTR (k
, fidx
);
29716 len
= strlen (str
);
29717 result
= result
* 613 + len
;
29718 for (i
= 0; i
< len
; i
++)
29719 result
= result
* 613 + (unsigned) str
[i
];
29724 result
= result
* 1231 + rs6000_hash_constant (XEXP (k
, fidx
));
29728 result
= result
* 613 + (unsigned) XINT (k
, fidx
);
29731 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT
))
29732 result
= result
* 613 + (unsigned) XWINT (k
, fidx
);
29736 for (i
= 0; i
< sizeof (HOST_WIDE_INT
) / sizeof (unsigned); i
++)
29737 result
= result
* 613 + (unsigned) (XWINT (k
, fidx
)
29744 gcc_unreachable ();
29751 toc_hasher::hash (toc_hash_struct
*thc
)
29753 return rs6000_hash_constant (thc
->key
) ^ thc
->key_mode
;
29756 /* Compare H1 and H2 for equivalence. */
29759 toc_hasher::equal (toc_hash_struct
*h1
, toc_hash_struct
*h2
)
29764 if (h1
->key_mode
!= h2
->key_mode
)
29767 return rtx_equal_p (r1
, r2
);
29770 /* These are the names given by the C++ front-end to vtables, and
29771 vtable-like objects. Ideally, this logic should not be here;
29772 instead, there should be some programmatic way of inquiring as
29773 to whether or not an object is a vtable. */
29775 #define VTABLE_NAME_P(NAME) \
29776 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
29777 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
29778 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
29779 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
29780 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
29782 #ifdef NO_DOLLAR_IN_LABEL
29783 /* Return a GGC-allocated character string translating dollar signs in
29784 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
29787 rs6000_xcoff_strip_dollar (const char *name
)
29793 q
= (const char *) strchr (name
, '$');
29795 if (q
== 0 || q
== name
)
29798 len
= strlen (name
);
29799 strip
= XALLOCAVEC (char, len
+ 1);
29800 strcpy (strip
, name
);
29801 p
= strip
+ (q
- name
);
29805 p
= strchr (p
+ 1, '$');
29808 return ggc_alloc_string (strip
, len
);
29813 rs6000_output_symbol_ref (FILE *file
, rtx x
)
29815 const char *name
= XSTR (x
, 0);
29817 /* Currently C++ toc references to vtables can be emitted before it
29818 is decided whether the vtable is public or private. If this is
29819 the case, then the linker will eventually complain that there is
29820 a reference to an unknown section. Thus, for vtables only,
29821 we emit the TOC reference to reference the identifier and not the
29823 if (VTABLE_NAME_P (name
))
29825 RS6000_OUTPUT_BASENAME (file
, name
);
29828 assemble_name (file
, name
);
29831 /* Output a TOC entry. We derive the entry name from what is being
29835 output_toc (FILE *file
, rtx x
, int labelno
, machine_mode mode
)
29838 const char *name
= buf
;
29840 HOST_WIDE_INT offset
= 0;
29842 gcc_assert (!TARGET_NO_TOC
);
29844 /* When the linker won't eliminate them, don't output duplicate
29845 TOC entries (this happens on AIX if there is any kind of TOC,
29846 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
29848 if (TARGET_TOC
&& GET_CODE (x
) != LABEL_REF
)
29850 struct toc_hash_struct
*h
;
29852 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
29853 time because GGC is not initialized at that point. */
29854 if (toc_hash_table
== NULL
)
29855 toc_hash_table
= hash_table
<toc_hasher
>::create_ggc (1021);
29857 h
= ggc_alloc
<toc_hash_struct
> ();
29859 h
->key_mode
= mode
;
29860 h
->labelno
= labelno
;
29862 toc_hash_struct
**found
= toc_hash_table
->find_slot (h
, INSERT
);
29863 if (*found
== NULL
)
29865 else /* This is indeed a duplicate.
29866 Set this label equal to that label. */
29868 fputs ("\t.set ", file
);
29869 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LC");
29870 fprintf (file
, "%d,", labelno
);
29871 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LC");
29872 fprintf (file
, "%d\n", ((*found
)->labelno
));
29875 if (TARGET_XCOFF
&& GET_CODE (x
) == SYMBOL_REF
29876 && (SYMBOL_REF_TLS_MODEL (x
) == TLS_MODEL_GLOBAL_DYNAMIC
29877 || SYMBOL_REF_TLS_MODEL (x
) == TLS_MODEL_LOCAL_DYNAMIC
))
29879 fputs ("\t.set ", file
);
29880 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LCM");
29881 fprintf (file
, "%d,", labelno
);
29882 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LCM");
29883 fprintf (file
, "%d\n", ((*found
)->labelno
));
29890 /* If we're going to put a double constant in the TOC, make sure it's
29891 aligned properly when strict alignment is on. */
29892 if ((CONST_DOUBLE_P (x
) || CONST_WIDE_INT_P (x
))
29893 && STRICT_ALIGNMENT
29894 && GET_MODE_BITSIZE (mode
) >= 64
29895 && ! (TARGET_NO_FP_IN_TOC
&& ! TARGET_MINIMAL_TOC
)) {
29896 ASM_OUTPUT_ALIGN (file
, 3);
29899 (*targetm
.asm_out
.internal_label
) (file
, "LC", labelno
);
29901 /* Handle FP constants specially. Note that if we have a minimal
29902 TOC, things we put here aren't actually in the TOC, so we can allow
29904 if (GET_CODE (x
) == CONST_DOUBLE
&&
29905 (GET_MODE (x
) == TFmode
|| GET_MODE (x
) == TDmode
29906 || GET_MODE (x
) == IFmode
|| GET_MODE (x
) == KFmode
))
29910 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x
)))
29911 REAL_VALUE_TO_TARGET_DECIMAL128 (*CONST_DOUBLE_REAL_VALUE (x
), k
);
29913 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x
), k
);
29917 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29918 fputs (DOUBLE_INT_ASM_OP
, file
);
29920 fprintf (file
, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29921 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
29922 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
29923 fprintf (file
, "0x%lx%08lx,0x%lx%08lx\n",
29924 k
[WORDS_BIG_ENDIAN
? 0 : 1] & 0xffffffff,
29925 k
[WORDS_BIG_ENDIAN
? 1 : 0] & 0xffffffff,
29926 k
[WORDS_BIG_ENDIAN
? 2 : 3] & 0xffffffff,
29927 k
[WORDS_BIG_ENDIAN
? 3 : 2] & 0xffffffff);
29932 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29933 fputs ("\t.long ", file
);
29935 fprintf (file
, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29936 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
29937 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
29938 fprintf (file
, "0x%lx,0x%lx,0x%lx,0x%lx\n",
29939 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
29940 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
29944 else if (GET_CODE (x
) == CONST_DOUBLE
&&
29945 (GET_MODE (x
) == DFmode
|| GET_MODE (x
) == DDmode
))
29949 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x
)))
29950 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (x
), k
);
29952 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x
), k
);
29956 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29957 fputs (DOUBLE_INT_ASM_OP
, file
);
29959 fprintf (file
, "\t.tc FD_%lx_%lx[TC],",
29960 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
29961 fprintf (file
, "0x%lx%08lx\n",
29962 k
[WORDS_BIG_ENDIAN
? 0 : 1] & 0xffffffff,
29963 k
[WORDS_BIG_ENDIAN
? 1 : 0] & 0xffffffff);
29968 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29969 fputs ("\t.long ", file
);
29971 fprintf (file
, "\t.tc FD_%lx_%lx[TC],",
29972 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
29973 fprintf (file
, "0x%lx,0x%lx\n",
29974 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
29978 else if (GET_CODE (x
) == CONST_DOUBLE
&&
29979 (GET_MODE (x
) == SFmode
|| GET_MODE (x
) == SDmode
))
29983 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x
)))
29984 REAL_VALUE_TO_TARGET_DECIMAL32 (*CONST_DOUBLE_REAL_VALUE (x
), l
);
29986 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x
), l
);
29990 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29991 fputs (DOUBLE_INT_ASM_OP
, file
);
29993 fprintf (file
, "\t.tc FS_%lx[TC],", l
& 0xffffffff);
29994 if (WORDS_BIG_ENDIAN
)
29995 fprintf (file
, "0x%lx00000000\n", l
& 0xffffffff);
29997 fprintf (file
, "0x%lx\n", l
& 0xffffffff);
30002 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
30003 fputs ("\t.long ", file
);
30005 fprintf (file
, "\t.tc FS_%lx[TC],", l
& 0xffffffff);
30006 fprintf (file
, "0x%lx\n", l
& 0xffffffff);
30010 else if (GET_MODE (x
) == VOIDmode
&& GET_CODE (x
) == CONST_INT
)
30012 unsigned HOST_WIDE_INT low
;
30013 HOST_WIDE_INT high
;
30015 low
= INTVAL (x
) & 0xffffffff;
30016 high
= (HOST_WIDE_INT
) INTVAL (x
) >> 32;
30018 /* TOC entries are always Pmode-sized, so when big-endian
30019 smaller integer constants in the TOC need to be padded.
30020 (This is still a win over putting the constants in
30021 a separate constant pool, because then we'd have
30022 to have both a TOC entry _and_ the actual constant.)
30024 For a 32-bit target, CONST_INT values are loaded and shifted
30025 entirely within `low' and can be stored in one TOC entry. */
30027 /* It would be easy to make this work, but it doesn't now. */
30028 gcc_assert (!TARGET_64BIT
|| POINTER_SIZE
>= GET_MODE_BITSIZE (mode
));
30030 if (WORDS_BIG_ENDIAN
&& POINTER_SIZE
> GET_MODE_BITSIZE (mode
))
30033 low
<<= POINTER_SIZE
- GET_MODE_BITSIZE (mode
);
30034 high
= (HOST_WIDE_INT
) low
>> 32;
30040 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
30041 fputs (DOUBLE_INT_ASM_OP
, file
);
30043 fprintf (file
, "\t.tc ID_%lx_%lx[TC],",
30044 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
30045 fprintf (file
, "0x%lx%08lx\n",
30046 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
30051 if (POINTER_SIZE
< GET_MODE_BITSIZE (mode
))
30053 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
30054 fputs ("\t.long ", file
);
30056 fprintf (file
, "\t.tc ID_%lx_%lx[TC],",
30057 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
30058 fprintf (file
, "0x%lx,0x%lx\n",
30059 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
30063 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
30064 fputs ("\t.long ", file
);
30066 fprintf (file
, "\t.tc IS_%lx[TC],", (long) low
& 0xffffffff);
30067 fprintf (file
, "0x%lx\n", (long) low
& 0xffffffff);
30073 if (GET_CODE (x
) == CONST
)
30075 gcc_assert (GET_CODE (XEXP (x
, 0)) == PLUS
30076 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
);
30078 base
= XEXP (XEXP (x
, 0), 0);
30079 offset
= INTVAL (XEXP (XEXP (x
, 0), 1));
30082 switch (GET_CODE (base
))
30085 name
= XSTR (base
, 0);
30089 ASM_GENERATE_INTERNAL_LABEL (buf
, "L",
30090 CODE_LABEL_NUMBER (XEXP (base
, 0)));
30094 ASM_GENERATE_INTERNAL_LABEL (buf
, "L", CODE_LABEL_NUMBER (base
));
30098 gcc_unreachable ();
30101 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
30102 fputs (TARGET_32BIT
? "\t.long " : DOUBLE_INT_ASM_OP
, file
);
30105 fputs ("\t.tc ", file
);
30106 RS6000_OUTPUT_BASENAME (file
, name
);
30109 fprintf (file
, ".N" HOST_WIDE_INT_PRINT_UNSIGNED
, - offset
);
30111 fprintf (file
, ".P" HOST_WIDE_INT_PRINT_UNSIGNED
, offset
);
30113 /* Mark large TOC symbols on AIX with [TE] so they are mapped
30114 after other TOC symbols, reducing overflow of small TOC access
30115 to [TC] symbols. */
30116 fputs (TARGET_XCOFF
&& TARGET_CMODEL
!= CMODEL_SMALL
30117 ? "[TE]," : "[TC],", file
);
30120 /* Currently C++ toc references to vtables can be emitted before it
30121 is decided whether the vtable is public or private. If this is
30122 the case, then the linker will eventually complain that there is
30123 a TOC reference to an unknown section. Thus, for vtables only,
30124 we emit the TOC reference to reference the symbol and not the
30126 if (VTABLE_NAME_P (name
))
30128 RS6000_OUTPUT_BASENAME (file
, name
);
30130 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, offset
);
30131 else if (offset
> 0)
30132 fprintf (file
, "+" HOST_WIDE_INT_PRINT_DEC
, offset
);
30135 output_addr_const (file
, x
);
30138 if (TARGET_XCOFF
&& GET_CODE (base
) == SYMBOL_REF
)
30140 switch (SYMBOL_REF_TLS_MODEL (base
))
30144 case TLS_MODEL_LOCAL_EXEC
:
30145 fputs ("@le", file
);
30147 case TLS_MODEL_INITIAL_EXEC
:
30148 fputs ("@ie", file
);
30150 /* Use global-dynamic for local-dynamic. */
30151 case TLS_MODEL_GLOBAL_DYNAMIC
:
30152 case TLS_MODEL_LOCAL_DYNAMIC
:
30154 (*targetm
.asm_out
.internal_label
) (file
, "LCM", labelno
);
30155 fputs ("\t.tc .", file
);
30156 RS6000_OUTPUT_BASENAME (file
, name
);
30157 fputs ("[TC],", file
);
30158 output_addr_const (file
, x
);
30159 fputs ("@m", file
);
30162 gcc_unreachable ();
30170 /* Output an assembler pseudo-op to write an ASCII string of N characters
30171 starting at P to FILE.
30173 On the RS/6000, we have to do this using the .byte operation and
30174 write out special characters outside the quoted string.
30175 Also, the assembler is broken; very long strings are truncated,
30176 so we must artificially break them up early. */
30179 output_ascii (FILE *file
, const char *p
, int n
)
30182 int i
, count_string
;
30183 const char *for_string
= "\t.byte \"";
30184 const char *for_decimal
= "\t.byte ";
30185 const char *to_close
= NULL
;
30188 for (i
= 0; i
< n
; i
++)
30191 if (c
>= ' ' && c
< 0177)
30194 fputs (for_string
, file
);
30197 /* Write two quotes to get one. */
30205 for_decimal
= "\"\n\t.byte ";
30209 if (count_string
>= 512)
30211 fputs (to_close
, file
);
30213 for_string
= "\t.byte \"";
30214 for_decimal
= "\t.byte ";
30222 fputs (for_decimal
, file
);
30223 fprintf (file
, "%d", c
);
30225 for_string
= "\n\t.byte \"";
30226 for_decimal
= ", ";
30232 /* Now close the string if we have written one. Then end the line. */
30234 fputs (to_close
, file
);
30237 /* Generate a unique section name for FILENAME for a section type
30238 represented by SECTION_DESC. Output goes into BUF.
30240 SECTION_DESC can be any string, as long as it is different for each
30241 possible section type.
30243 We name the section in the same manner as xlc. The name begins with an
30244 underscore followed by the filename (after stripping any leading directory
30245 names) with the last period replaced by the string SECTION_DESC. If
30246 FILENAME does not contain a period, SECTION_DESC is appended to the end of
30250 rs6000_gen_section_name (char **buf
, const char *filename
,
30251 const char *section_desc
)
30253 const char *q
, *after_last_slash
, *last_period
= 0;
30257 after_last_slash
= filename
;
30258 for (q
= filename
; *q
; q
++)
30261 after_last_slash
= q
+ 1;
30262 else if (*q
== '.')
30266 len
= strlen (after_last_slash
) + strlen (section_desc
) + 2;
30267 *buf
= (char *) xmalloc (len
);
30272 for (q
= after_last_slash
; *q
; q
++)
30274 if (q
== last_period
)
30276 strcpy (p
, section_desc
);
30277 p
+= strlen (section_desc
);
30281 else if (ISALNUM (*q
))
30285 if (last_period
== 0)
30286 strcpy (p
, section_desc
);
30291 /* Emit profile function. */
30294 output_profile_hook (int labelno ATTRIBUTE_UNUSED
)
30296 /* Non-standard profiling for kernels, which just saves LR then calls
30297 _mcount without worrying about arg saves. The idea is to change
30298 the function prologue as little as possible as it isn't easy to
30299 account for arg save/restore code added just for _mcount. */
30300 if (TARGET_PROFILE_KERNEL
)
30303 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
30305 #ifndef NO_PROFILE_COUNTERS
30306 # define NO_PROFILE_COUNTERS 0
30308 if (NO_PROFILE_COUNTERS
)
30309 emit_library_call (init_one_libfunc (RS6000_MCOUNT
),
30310 LCT_NORMAL
, VOIDmode
);
30314 const char *label_name
;
30317 ASM_GENERATE_INTERNAL_LABEL (buf
, "LP", labelno
);
30318 label_name
= ggc_strdup ((*targetm
.strip_name_encoding
) (buf
));
30319 fun
= gen_rtx_SYMBOL_REF (Pmode
, label_name
);
30321 emit_library_call (init_one_libfunc (RS6000_MCOUNT
),
30322 LCT_NORMAL
, VOIDmode
, fun
, Pmode
);
30325 else if (DEFAULT_ABI
== ABI_DARWIN
)
30327 const char *mcount_name
= RS6000_MCOUNT
;
30328 int caller_addr_regno
= LR_REGNO
;
30330 /* Be conservative and always set this, at least for now. */
30331 crtl
->uses_pic_offset_table
= 1;
30334 /* For PIC code, set up a stub and collect the caller's address
30335 from r0, which is where the prologue puts it. */
30336 if (MACHOPIC_INDIRECT
30337 && crtl
->uses_pic_offset_table
)
30338 caller_addr_regno
= 0;
30340 emit_library_call (gen_rtx_SYMBOL_REF (Pmode
, mcount_name
),
30341 LCT_NORMAL
, VOIDmode
,
30342 gen_rtx_REG (Pmode
, caller_addr_regno
), Pmode
);
30346 /* Write function profiler code. */
30349 output_function_profiler (FILE *file
, int labelno
)
30353 switch (DEFAULT_ABI
)
30356 gcc_unreachable ();
30361 warning (0, "no profiling of 64-bit code for this ABI");
30364 ASM_GENERATE_INTERNAL_LABEL (buf
, "LP", labelno
);
30365 fprintf (file
, "\tmflr %s\n", reg_names
[0]);
30366 if (NO_PROFILE_COUNTERS
)
30368 asm_fprintf (file
, "\tstw %s,4(%s)\n",
30369 reg_names
[0], reg_names
[1]);
30371 else if (TARGET_SECURE_PLT
&& flag_pic
)
30373 if (TARGET_LINK_STACK
)
30376 get_ppc476_thunk_name (name
);
30377 asm_fprintf (file
, "\tbl %s\n", name
);
30380 asm_fprintf (file
, "\tbcl 20,31,1f\n1:\n");
30381 asm_fprintf (file
, "\tstw %s,4(%s)\n",
30382 reg_names
[0], reg_names
[1]);
30383 asm_fprintf (file
, "\tmflr %s\n", reg_names
[12]);
30384 asm_fprintf (file
, "\taddis %s,%s,",
30385 reg_names
[12], reg_names
[12]);
30386 assemble_name (file
, buf
);
30387 asm_fprintf (file
, "-1b@ha\n\tla %s,", reg_names
[0]);
30388 assemble_name (file
, buf
);
30389 asm_fprintf (file
, "-1b@l(%s)\n", reg_names
[12]);
30391 else if (flag_pic
== 1)
30393 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file
);
30394 asm_fprintf (file
, "\tstw %s,4(%s)\n",
30395 reg_names
[0], reg_names
[1]);
30396 asm_fprintf (file
, "\tmflr %s\n", reg_names
[12]);
30397 asm_fprintf (file
, "\tlwz %s,", reg_names
[0]);
30398 assemble_name (file
, buf
);
30399 asm_fprintf (file
, "@got(%s)\n", reg_names
[12]);
30401 else if (flag_pic
> 1)
30403 asm_fprintf (file
, "\tstw %s,4(%s)\n",
30404 reg_names
[0], reg_names
[1]);
30405 /* Now, we need to get the address of the label. */
30406 if (TARGET_LINK_STACK
)
30409 get_ppc476_thunk_name (name
);
30410 asm_fprintf (file
, "\tbl %s\n\tb 1f\n\t.long ", name
);
30411 assemble_name (file
, buf
);
30412 fputs ("-.\n1:", file
);
30413 asm_fprintf (file
, "\tmflr %s\n", reg_names
[11]);
30414 asm_fprintf (file
, "\taddi %s,%s,4\n",
30415 reg_names
[11], reg_names
[11]);
30419 fputs ("\tbcl 20,31,1f\n\t.long ", file
);
30420 assemble_name (file
, buf
);
30421 fputs ("-.\n1:", file
);
30422 asm_fprintf (file
, "\tmflr %s\n", reg_names
[11]);
30424 asm_fprintf (file
, "\tlwz %s,0(%s)\n",
30425 reg_names
[0], reg_names
[11]);
30426 asm_fprintf (file
, "\tadd %s,%s,%s\n",
30427 reg_names
[0], reg_names
[0], reg_names
[11]);
30431 asm_fprintf (file
, "\tlis %s,", reg_names
[12]);
30432 assemble_name (file
, buf
);
30433 fputs ("@ha\n", file
);
30434 asm_fprintf (file
, "\tstw %s,4(%s)\n",
30435 reg_names
[0], reg_names
[1]);
30436 asm_fprintf (file
, "\tla %s,", reg_names
[0]);
30437 assemble_name (file
, buf
);
30438 asm_fprintf (file
, "@l(%s)\n", reg_names
[12]);
30441 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
30442 fprintf (file
, "\tbl %s%s\n",
30443 RS6000_MCOUNT
, flag_pic
? "@plt" : "");
30449 /* Don't do anything, done in output_profile_hook (). */
30456 /* The following variable value is the last issued insn. */
30458 static rtx_insn
*last_scheduled_insn
;
30460 /* The following variable helps to balance issuing of load and
30461 store instructions */
30463 static int load_store_pendulum
;
30465 /* The following variable helps pair divide insns during scheduling. */
30466 static int divide_cnt
;
30467 /* The following variable helps pair and alternate vector and vector load
30468 insns during scheduling. */
30469 static int vec_pairing
;
30472 /* Power4 load update and store update instructions are cracked into a
30473 load or store and an integer insn which are executed in the same cycle.
30474 Branches have their own dispatch slot which does not count against the
30475 GCC issue rate, but it changes the program flow so there are no other
30476 instructions to issue in this cycle. */
30479 rs6000_variable_issue_1 (rtx_insn
*insn
, int more
)
30481 last_scheduled_insn
= insn
;
30482 if (GET_CODE (PATTERN (insn
)) == USE
30483 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
30485 cached_can_issue_more
= more
;
30486 return cached_can_issue_more
;
30489 if (insn_terminates_group_p (insn
, current_group
))
30491 cached_can_issue_more
= 0;
30492 return cached_can_issue_more
;
30495 /* If no reservation, but reach here */
30496 if (recog_memoized (insn
) < 0)
30499 if (rs6000_sched_groups
)
30501 if (is_microcoded_insn (insn
))
30502 cached_can_issue_more
= 0;
30503 else if (is_cracked_insn (insn
))
30504 cached_can_issue_more
= more
> 2 ? more
- 2 : 0;
30506 cached_can_issue_more
= more
- 1;
30508 return cached_can_issue_more
;
30511 if (rs6000_cpu_attr
== CPU_CELL
&& is_nonpipeline_insn (insn
))
30514 cached_can_issue_more
= more
- 1;
30515 return cached_can_issue_more
;
30519 rs6000_variable_issue (FILE *stream
, int verbose
, rtx_insn
*insn
, int more
)
30521 int r
= rs6000_variable_issue_1 (insn
, more
);
30523 fprintf (stream
, "// rs6000_variable_issue (more = %d) = %d\n", more
, r
);
30527 /* Adjust the cost of a scheduling dependency. Return the new cost of
30528 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
30531 rs6000_adjust_cost (rtx_insn
*insn
, int dep_type
, rtx_insn
*dep_insn
, int cost
,
30534 enum attr_type attr_type
;
30536 if (recog_memoized (insn
) < 0 || recog_memoized (dep_insn
) < 0)
30543 /* Data dependency; DEP_INSN writes a register that INSN reads
30544 some cycles later. */
30546 /* Separate a load from a narrower, dependent store. */
30547 if ((rs6000_sched_groups
|| rs6000_cpu_attr
== CPU_POWER9
)
30548 && GET_CODE (PATTERN (insn
)) == SET
30549 && GET_CODE (PATTERN (dep_insn
)) == SET
30550 && GET_CODE (XEXP (PATTERN (insn
), 1)) == MEM
30551 && GET_CODE (XEXP (PATTERN (dep_insn
), 0)) == MEM
30552 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn
), 1)))
30553 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn
), 0)))))
30556 attr_type
= get_attr_type (insn
);
30561 /* Tell the first scheduling pass about the latency between
30562 a mtctr and bctr (and mtlr and br/blr). The first
30563 scheduling pass will not know about this latency since
30564 the mtctr instruction, which has the latency associated
30565 to it, will be generated by reload. */
30568 /* Leave some extra cycles between a compare and its
30569 dependent branch, to inhibit expensive mispredicts. */
30570 if ((rs6000_cpu_attr
== CPU_PPC603
30571 || rs6000_cpu_attr
== CPU_PPC604
30572 || rs6000_cpu_attr
== CPU_PPC604E
30573 || rs6000_cpu_attr
== CPU_PPC620
30574 || rs6000_cpu_attr
== CPU_PPC630
30575 || rs6000_cpu_attr
== CPU_PPC750
30576 || rs6000_cpu_attr
== CPU_PPC7400
30577 || rs6000_cpu_attr
== CPU_PPC7450
30578 || rs6000_cpu_attr
== CPU_PPCE5500
30579 || rs6000_cpu_attr
== CPU_PPCE6500
30580 || rs6000_cpu_attr
== CPU_POWER4
30581 || rs6000_cpu_attr
== CPU_POWER5
30582 || rs6000_cpu_attr
== CPU_POWER7
30583 || rs6000_cpu_attr
== CPU_POWER8
30584 || rs6000_cpu_attr
== CPU_POWER9
30585 || rs6000_cpu_attr
== CPU_CELL
)
30586 && recog_memoized (dep_insn
)
30587 && (INSN_CODE (dep_insn
) >= 0))
30589 switch (get_attr_type (dep_insn
))
30592 case TYPE_FPCOMPARE
:
30593 case TYPE_CR_LOGICAL
:
30594 case TYPE_DELAYED_CR
:
30598 if (get_attr_dot (dep_insn
) == DOT_YES
)
30603 if (get_attr_dot (dep_insn
) == DOT_YES
30604 && get_attr_var_shift (dep_insn
) == VAR_SHIFT_NO
)
30615 if ((rs6000_cpu
== PROCESSOR_POWER6
)
30616 && recog_memoized (dep_insn
)
30617 && (INSN_CODE (dep_insn
) >= 0))
30620 if (GET_CODE (PATTERN (insn
)) != SET
)
30621 /* If this happens, we have to extend this to schedule
30622 optimally. Return default for now. */
30625 /* Adjust the cost for the case where the value written
30626 by a fixed point operation is used as the address
30627 gen value on a store. */
30628 switch (get_attr_type (dep_insn
))
30633 if (! rs6000_store_data_bypass_p (dep_insn
, insn
))
30634 return get_attr_sign_extend (dep_insn
)
30635 == SIGN_EXTEND_YES
? 6 : 4;
30640 if (! rs6000_store_data_bypass_p (dep_insn
, insn
))
30641 return get_attr_var_shift (dep_insn
) == VAR_SHIFT_YES
?
30651 if (! rs6000_store_data_bypass_p (dep_insn
, insn
))
30659 if (get_attr_update (dep_insn
) == UPDATE_YES
30660 && ! rs6000_store_data_bypass_p (dep_insn
, insn
))
30666 if (! rs6000_store_data_bypass_p (dep_insn
, insn
))
30672 if (! rs6000_store_data_bypass_p (dep_insn
, insn
))
30673 return get_attr_size (dep_insn
) == SIZE_32
? 45 : 57;
30683 if ((rs6000_cpu
== PROCESSOR_POWER6
)
30684 && recog_memoized (dep_insn
)
30685 && (INSN_CODE (dep_insn
) >= 0))
30688 /* Adjust the cost for the case where the value written
30689 by a fixed point instruction is used within the address
30690 gen portion of a subsequent load(u)(x) */
30691 switch (get_attr_type (dep_insn
))
30696 if (set_to_load_agen (dep_insn
, insn
))
30697 return get_attr_sign_extend (dep_insn
)
30698 == SIGN_EXTEND_YES
? 6 : 4;
30703 if (set_to_load_agen (dep_insn
, insn
))
30704 return get_attr_var_shift (dep_insn
) == VAR_SHIFT_YES
?
30714 if (set_to_load_agen (dep_insn
, insn
))
30722 if (get_attr_update (dep_insn
) == UPDATE_YES
30723 && set_to_load_agen (dep_insn
, insn
))
30729 if (set_to_load_agen (dep_insn
, insn
))
30735 if (set_to_load_agen (dep_insn
, insn
))
30736 return get_attr_size (dep_insn
) == SIZE_32
? 45 : 57;
30746 if ((rs6000_cpu
== PROCESSOR_POWER6
)
30747 && get_attr_update (insn
) == UPDATE_NO
30748 && recog_memoized (dep_insn
)
30749 && (INSN_CODE (dep_insn
) >= 0)
30750 && (get_attr_type (dep_insn
) == TYPE_MFFGPR
))
30757 /* Fall out to return default cost. */
30761 case REG_DEP_OUTPUT
:
30762 /* Output dependency; DEP_INSN writes a register that INSN writes some
30764 if ((rs6000_cpu
== PROCESSOR_POWER6
)
30765 && recog_memoized (dep_insn
)
30766 && (INSN_CODE (dep_insn
) >= 0))
30768 attr_type
= get_attr_type (insn
);
30773 case TYPE_FPSIMPLE
:
30774 if (get_attr_type (dep_insn
) == TYPE_FP
30775 || get_attr_type (dep_insn
) == TYPE_FPSIMPLE
)
30779 if (get_attr_update (insn
) == UPDATE_NO
30780 && get_attr_type (dep_insn
) == TYPE_MFFGPR
)
30787 /* Fall through, no cost for output dependency. */
30791 /* Anti dependency; DEP_INSN reads a register that INSN writes some
30796 gcc_unreachable ();
30802 /* Debug version of rs6000_adjust_cost. */
30805 rs6000_debug_adjust_cost (rtx_insn
*insn
, int dep_type
, rtx_insn
*dep_insn
,
30806 int cost
, unsigned int dw
)
30808 int ret
= rs6000_adjust_cost (insn
, dep_type
, dep_insn
, cost
, dw
);
30816 default: dep
= "unknown depencency"; break;
30817 case REG_DEP_TRUE
: dep
= "data dependency"; break;
30818 case REG_DEP_OUTPUT
: dep
= "output dependency"; break;
30819 case REG_DEP_ANTI
: dep
= "anti depencency"; break;
30823 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
30824 "%s, insn:\n", ret
, cost
, dep
);
30832 /* The function returns a true if INSN is microcoded.
30833 Return false otherwise. */
30836 is_microcoded_insn (rtx_insn
*insn
)
30838 if (!insn
|| !NONDEBUG_INSN_P (insn
)
30839 || GET_CODE (PATTERN (insn
)) == USE
30840 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
30843 if (rs6000_cpu_attr
== CPU_CELL
)
30844 return get_attr_cell_micro (insn
) == CELL_MICRO_ALWAYS
;
30846 if (rs6000_sched_groups
30847 && (rs6000_cpu
== PROCESSOR_POWER4
|| rs6000_cpu
== PROCESSOR_POWER5
))
30849 enum attr_type type
= get_attr_type (insn
);
30850 if ((type
== TYPE_LOAD
30851 && get_attr_update (insn
) == UPDATE_YES
30852 && get_attr_sign_extend (insn
) == SIGN_EXTEND_YES
)
30853 || ((type
== TYPE_LOAD
|| type
== TYPE_STORE
)
30854 && get_attr_update (insn
) == UPDATE_YES
30855 && get_attr_indexed (insn
) == INDEXED_YES
)
30856 || type
== TYPE_MFCR
)
30863 /* The function returns true if INSN is cracked into 2 instructions
30864 by the processor (and therefore occupies 2 issue slots). */
30867 is_cracked_insn (rtx_insn
*insn
)
30869 if (!insn
|| !NONDEBUG_INSN_P (insn
)
30870 || GET_CODE (PATTERN (insn
)) == USE
30871 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
30874 if (rs6000_sched_groups
30875 && (rs6000_cpu
== PROCESSOR_POWER4
|| rs6000_cpu
== PROCESSOR_POWER5
))
30877 enum attr_type type
= get_attr_type (insn
);
30878 if ((type
== TYPE_LOAD
30879 && get_attr_sign_extend (insn
) == SIGN_EXTEND_YES
30880 && get_attr_update (insn
) == UPDATE_NO
)
30881 || (type
== TYPE_LOAD
30882 && get_attr_sign_extend (insn
) == SIGN_EXTEND_NO
30883 && get_attr_update (insn
) == UPDATE_YES
30884 && get_attr_indexed (insn
) == INDEXED_NO
)
30885 || (type
== TYPE_STORE
30886 && get_attr_update (insn
) == UPDATE_YES
30887 && get_attr_indexed (insn
) == INDEXED_NO
)
30888 || ((type
== TYPE_FPLOAD
|| type
== TYPE_FPSTORE
)
30889 && get_attr_update (insn
) == UPDATE_YES
)
30890 || type
== TYPE_DELAYED_CR
30891 || (type
== TYPE_EXTS
30892 && get_attr_dot (insn
) == DOT_YES
)
30893 || (type
== TYPE_SHIFT
30894 && get_attr_dot (insn
) == DOT_YES
30895 && get_attr_var_shift (insn
) == VAR_SHIFT_NO
)
30896 || (type
== TYPE_MUL
30897 && get_attr_dot (insn
) == DOT_YES
)
30898 || type
== TYPE_DIV
30899 || (type
== TYPE_INSERT
30900 && get_attr_size (insn
) == SIZE_32
))
30907 /* The function returns true if INSN can be issued only from
30908 the branch slot. */
30911 is_branch_slot_insn (rtx_insn
*insn
)
30913 if (!insn
|| !NONDEBUG_INSN_P (insn
)
30914 || GET_CODE (PATTERN (insn
)) == USE
30915 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
30918 if (rs6000_sched_groups
)
30920 enum attr_type type
= get_attr_type (insn
);
30921 if (type
== TYPE_BRANCH
|| type
== TYPE_JMPREG
)
30929 /* The function returns true if out_inst sets a value that is
30930 used in the address generation computation of in_insn */
30932 set_to_load_agen (rtx_insn
*out_insn
, rtx_insn
*in_insn
)
30934 rtx out_set
, in_set
;
30936 /* For performance reasons, only handle the simple case where
30937 both loads are a single_set. */
30938 out_set
= single_set (out_insn
);
30941 in_set
= single_set (in_insn
);
30943 return reg_mentioned_p (SET_DEST (out_set
), SET_SRC (in_set
));
30949 /* Try to determine base/offset/size parts of the given MEM.
30950 Return true if successful, false if all the values couldn't
30953 This function only looks for REG or REG+CONST address forms.
30954 REG+REG address form will return false. */
30957 get_memref_parts (rtx mem
, rtx
*base
, HOST_WIDE_INT
*offset
,
30958 HOST_WIDE_INT
*size
)
30961 if MEM_SIZE_KNOWN_P (mem
)
30962 *size
= MEM_SIZE (mem
);
30966 addr_rtx
= (XEXP (mem
, 0));
30967 if (GET_CODE (addr_rtx
) == PRE_MODIFY
)
30968 addr_rtx
= XEXP (addr_rtx
, 1);
30971 while (GET_CODE (addr_rtx
) == PLUS
30972 && CONST_INT_P (XEXP (addr_rtx
, 1)))
30974 *offset
+= INTVAL (XEXP (addr_rtx
, 1));
30975 addr_rtx
= XEXP (addr_rtx
, 0);
30977 if (!REG_P (addr_rtx
))
30984 /* The function returns true if the target storage location of
30985 mem1 is adjacent to the target storage location of mem2 */
30986 /* Return 1 if memory locations are adjacent. */
30989 adjacent_mem_locations (rtx mem1
, rtx mem2
)
30992 HOST_WIDE_INT off1
, size1
, off2
, size2
;
30994 if (get_memref_parts (mem1
, ®1
, &off1
, &size1
)
30995 && get_memref_parts (mem2
, ®2
, &off2
, &size2
))
30996 return ((REGNO (reg1
) == REGNO (reg2
))
30997 && ((off1
+ size1
== off2
)
30998 || (off2
+ size2
== off1
)));
31003 /* This function returns true if it can be determined that the two MEM
31004 locations overlap by at least 1 byte based on base reg/offset/size. */
31007 mem_locations_overlap (rtx mem1
, rtx mem2
)
31010 HOST_WIDE_INT off1
, size1
, off2
, size2
;
31012 if (get_memref_parts (mem1
, ®1
, &off1
, &size1
)
31013 && get_memref_parts (mem2
, ®2
, &off2
, &size2
))
31014 return ((REGNO (reg1
) == REGNO (reg2
))
31015 && (((off1
<= off2
) && (off1
+ size1
> off2
))
31016 || ((off2
<= off1
) && (off2
+ size2
> off1
))));
31021 /* A C statement (sans semicolon) to update the integer scheduling
31022 priority INSN_PRIORITY (INSN). Increase the priority to execute the
31023 INSN earlier, reduce the priority to execute INSN later. Do not
31024 define this macro if you do not need to adjust the scheduling
31025 priorities of insns. */
31028 rs6000_adjust_priority (rtx_insn
*insn ATTRIBUTE_UNUSED
, int priority
)
31030 rtx load_mem
, str_mem
;
31031 /* On machines (like the 750) which have asymmetric integer units,
31032 where one integer unit can do multiply and divides and the other
31033 can't, reduce the priority of multiply/divide so it is scheduled
31034 before other integer operations. */
31037 if (! INSN_P (insn
))
31040 if (GET_CODE (PATTERN (insn
)) == USE
)
31043 switch (rs6000_cpu_attr
) {
31045 switch (get_attr_type (insn
))
31052 fprintf (stderr
, "priority was %#x (%d) before adjustment\n",
31053 priority
, priority
);
31054 if (priority
>= 0 && priority
< 0x01000000)
31061 if (insn_must_be_first_in_group (insn
)
31062 && reload_completed
31063 && current_sched_info
->sched_max_insns_priority
31064 && rs6000_sched_restricted_insns_priority
)
31067 /* Prioritize insns that can be dispatched only in the first
31069 if (rs6000_sched_restricted_insns_priority
== 1)
31070 /* Attach highest priority to insn. This means that in
31071 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
31072 precede 'priority' (critical path) considerations. */
31073 return current_sched_info
->sched_max_insns_priority
;
31074 else if (rs6000_sched_restricted_insns_priority
== 2)
31075 /* Increase priority of insn by a minimal amount. This means that in
31076 haifa-sched.c:ready_sort(), only 'priority' (critical path)
31077 considerations precede dispatch-slot restriction considerations. */
31078 return (priority
+ 1);
31081 if (rs6000_cpu
== PROCESSOR_POWER6
31082 && ((load_store_pendulum
== -2 && is_load_insn (insn
, &load_mem
))
31083 || (load_store_pendulum
== 2 && is_store_insn (insn
, &str_mem
))))
31084 /* Attach highest priority to insn if the scheduler has just issued two
31085 stores and this instruction is a load, or two loads and this instruction
31086 is a store. Power6 wants loads and stores scheduled alternately
31088 return current_sched_info
->sched_max_insns_priority
;
31093 /* Return true if the instruction is nonpipelined on the Cell. */
31095 is_nonpipeline_insn (rtx_insn
*insn
)
31097 enum attr_type type
;
31098 if (!insn
|| !NONDEBUG_INSN_P (insn
)
31099 || GET_CODE (PATTERN (insn
)) == USE
31100 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
31103 type
= get_attr_type (insn
);
31104 if (type
== TYPE_MUL
31105 || type
== TYPE_DIV
31106 || type
== TYPE_SDIV
31107 || type
== TYPE_DDIV
31108 || type
== TYPE_SSQRT
31109 || type
== TYPE_DSQRT
31110 || type
== TYPE_MFCR
31111 || type
== TYPE_MFCRF
31112 || type
== TYPE_MFJMPR
)
31120 /* Return how many instructions the machine can issue per cycle. */
31123 rs6000_issue_rate (void)
31125 /* Unless scheduling for register pressure, use issue rate of 1 for
31126 first scheduling pass to decrease degradation. */
31127 if (!reload_completed
&& !flag_sched_pressure
)
31130 switch (rs6000_cpu_attr
) {
31132 case CPU_PPC601
: /* ? */
31142 case CPU_PPCE300C2
:
31143 case CPU_PPCE300C3
:
31144 case CPU_PPCE500MC
:
31145 case CPU_PPCE500MC64
:
31170 /* Return how many instructions to look ahead for better insn
31174 rs6000_use_sched_lookahead (void)
31176 switch (rs6000_cpu_attr
)
31183 return (reload_completed
? 8 : 0);
31190 /* We are choosing insn from the ready queue. Return zero if INSN can be
31193 rs6000_use_sched_lookahead_guard (rtx_insn
*insn
, int ready_index
)
31195 if (ready_index
== 0)
31198 if (rs6000_cpu_attr
!= CPU_CELL
)
31201 gcc_assert (insn
!= NULL_RTX
&& INSN_P (insn
));
31203 if (!reload_completed
31204 || is_nonpipeline_insn (insn
)
31205 || is_microcoded_insn (insn
))
31211 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
31212 and return true. */
31215 find_mem_ref (rtx pat
, rtx
*mem_ref
)
31220 /* stack_tie does not produce any real memory traffic. */
31221 if (tie_operand (pat
, VOIDmode
))
31224 if (GET_CODE (pat
) == MEM
)
31230 /* Recursively process the pattern. */
31231 fmt
= GET_RTX_FORMAT (GET_CODE (pat
));
31233 for (i
= GET_RTX_LENGTH (GET_CODE (pat
)) - 1; i
>= 0; i
--)
31237 if (find_mem_ref (XEXP (pat
, i
), mem_ref
))
31240 else if (fmt
[i
] == 'E')
31241 for (j
= XVECLEN (pat
, i
) - 1; j
>= 0; j
--)
31243 if (find_mem_ref (XVECEXP (pat
, i
, j
), mem_ref
))
31251 /* Determine if PAT is a PATTERN of a load insn. */
31254 is_load_insn1 (rtx pat
, rtx
*load_mem
)
31256 if (!pat
|| pat
== NULL_RTX
)
31259 if (GET_CODE (pat
) == SET
)
31260 return find_mem_ref (SET_SRC (pat
), load_mem
);
31262 if (GET_CODE (pat
) == PARALLEL
)
31266 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
31267 if (is_load_insn1 (XVECEXP (pat
, 0, i
), load_mem
))
31274 /* Determine if INSN loads from memory. */
31277 is_load_insn (rtx insn
, rtx
*load_mem
)
31279 if (!insn
|| !INSN_P (insn
))
31285 return is_load_insn1 (PATTERN (insn
), load_mem
);
31288 /* Determine if PAT is a PATTERN of a store insn. */
31291 is_store_insn1 (rtx pat
, rtx
*str_mem
)
31293 if (!pat
|| pat
== NULL_RTX
)
31296 if (GET_CODE (pat
) == SET
)
31297 return find_mem_ref (SET_DEST (pat
), str_mem
);
31299 if (GET_CODE (pat
) == PARALLEL
)
31303 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
31304 if (is_store_insn1 (XVECEXP (pat
, 0, i
), str_mem
))
31311 /* Determine if INSN stores to memory. */
31314 is_store_insn (rtx insn
, rtx
*str_mem
)
31316 if (!insn
|| !INSN_P (insn
))
31319 return is_store_insn1 (PATTERN (insn
), str_mem
);
31322 /* Return whether TYPE is a Power9 pairable vector instruction type. */
31325 is_power9_pairable_vec_type (enum attr_type type
)
31329 case TYPE_VECSIMPLE
:
31330 case TYPE_VECCOMPLEX
:
31334 case TYPE_VECFLOAT
:
31336 case TYPE_VECDOUBLE
:
31344 /* Returns whether the dependence between INSN and NEXT is considered
31345 costly by the given target. */
31348 rs6000_is_costly_dependence (dep_t dep
, int cost
, int distance
)
31352 rtx load_mem
, str_mem
;
31354 /* If the flag is not enabled - no dependence is considered costly;
31355 allow all dependent insns in the same group.
31356 This is the most aggressive option. */
31357 if (rs6000_sched_costly_dep
== no_dep_costly
)
31360 /* If the flag is set to 1 - a dependence is always considered costly;
31361 do not allow dependent instructions in the same group.
31362 This is the most conservative option. */
31363 if (rs6000_sched_costly_dep
== all_deps_costly
)
31366 insn
= DEP_PRO (dep
);
31367 next
= DEP_CON (dep
);
31369 if (rs6000_sched_costly_dep
== store_to_load_dep_costly
31370 && is_load_insn (next
, &load_mem
)
31371 && is_store_insn (insn
, &str_mem
))
31372 /* Prevent load after store in the same group. */
31375 if (rs6000_sched_costly_dep
== true_store_to_load_dep_costly
31376 && is_load_insn (next
, &load_mem
)
31377 && is_store_insn (insn
, &str_mem
)
31378 && DEP_TYPE (dep
) == REG_DEP_TRUE
31379 && mem_locations_overlap(str_mem
, load_mem
))
31380 /* Prevent load after store in the same group if it is a true
31384 /* The flag is set to X; dependences with latency >= X are considered costly,
31385 and will not be scheduled in the same group. */
31386 if (rs6000_sched_costly_dep
<= max_dep_latency
31387 && ((cost
- distance
) >= (int)rs6000_sched_costly_dep
))
31393 /* Return the next insn after INSN that is found before TAIL is reached,
31394 skipping any "non-active" insns - insns that will not actually occupy
31395 an issue slot. Return NULL_RTX if such an insn is not found. */
31398 get_next_active_insn (rtx_insn
*insn
, rtx_insn
*tail
)
31400 if (insn
== NULL_RTX
|| insn
== tail
)
31405 insn
= NEXT_INSN (insn
);
31406 if (insn
== NULL_RTX
|| insn
== tail
)
31410 || JUMP_P (insn
) || JUMP_TABLE_DATA_P (insn
)
31411 || (NONJUMP_INSN_P (insn
)
31412 && GET_CODE (PATTERN (insn
)) != USE
31413 && GET_CODE (PATTERN (insn
)) != CLOBBER
31414 && INSN_CODE (insn
) != CODE_FOR_stack_tie
))
31420 /* Do Power9 specific sched_reorder2 reordering of ready list. */
31423 power9_sched_reorder2 (rtx_insn
**ready
, int lastpos
)
31428 enum attr_type type
, type2
;
31430 type
= get_attr_type (last_scheduled_insn
);
31432 /* Try to issue fixed point divides back-to-back in pairs so they will be
31433 routed to separate execution units and execute in parallel. */
31434 if (type
== TYPE_DIV
&& divide_cnt
== 0)
31436 /* First divide has been scheduled. */
31439 /* Scan the ready list looking for another divide, if found move it
31440 to the end of the list so it is chosen next. */
31444 if (recog_memoized (ready
[pos
]) >= 0
31445 && get_attr_type (ready
[pos
]) == TYPE_DIV
)
31448 for (i
= pos
; i
< lastpos
; i
++)
31449 ready
[i
] = ready
[i
+ 1];
31450 ready
[lastpos
] = tmp
;
31458 /* Last insn was the 2nd divide or not a divide, reset the counter. */
31461 /* The best dispatch throughput for vector and vector load insns can be
31462 achieved by interleaving a vector and vector load such that they'll
31463 dispatch to the same superslice. If this pairing cannot be achieved
31464 then it is best to pair vector insns together and vector load insns
31467 To aid in this pairing, vec_pairing maintains the current state with
31468 the following values:
31470 0 : Initial state, no vecload/vector pairing has been started.
31472 1 : A vecload or vector insn has been issued and a candidate for
31473 pairing has been found and moved to the end of the ready
31475 if (type
== TYPE_VECLOAD
)
31477 /* Issued a vecload. */
31478 if (vec_pairing
== 0)
31480 int vecload_pos
= -1;
31481 /* We issued a single vecload, look for a vector insn to pair it
31482 with. If one isn't found, try to pair another vecload. */
31486 if (recog_memoized (ready
[pos
]) >= 0)
31488 type2
= get_attr_type (ready
[pos
]);
31489 if (is_power9_pairable_vec_type (type2
))
31491 /* Found a vector insn to pair with, move it to the
31492 end of the ready list so it is scheduled next. */
31494 for (i
= pos
; i
< lastpos
; i
++)
31495 ready
[i
] = ready
[i
+ 1];
31496 ready
[lastpos
] = tmp
;
31498 return cached_can_issue_more
;
31500 else if (type2
== TYPE_VECLOAD
&& vecload_pos
== -1)
31501 /* Remember position of first vecload seen. */
31506 if (vecload_pos
>= 0)
31508 /* Didn't find a vector to pair with but did find a vecload,
31509 move it to the end of the ready list. */
31510 tmp
= ready
[vecload_pos
];
31511 for (i
= vecload_pos
; i
< lastpos
; i
++)
31512 ready
[i
] = ready
[i
+ 1];
31513 ready
[lastpos
] = tmp
;
31515 return cached_can_issue_more
;
31519 else if (is_power9_pairable_vec_type (type
))
31521 /* Issued a vector operation. */
31522 if (vec_pairing
== 0)
31525 /* We issued a single vector insn, look for a vecload to pair it
31526 with. If one isn't found, try to pair another vector. */
31530 if (recog_memoized (ready
[pos
]) >= 0)
31532 type2
= get_attr_type (ready
[pos
]);
31533 if (type2
== TYPE_VECLOAD
)
31535 /* Found a vecload insn to pair with, move it to the
31536 end of the ready list so it is scheduled next. */
31538 for (i
= pos
; i
< lastpos
; i
++)
31539 ready
[i
] = ready
[i
+ 1];
31540 ready
[lastpos
] = tmp
;
31542 return cached_can_issue_more
;
31544 else if (is_power9_pairable_vec_type (type2
)
31546 /* Remember position of first vector insn seen. */
31553 /* Didn't find a vecload to pair with but did find a vector
31554 insn, move it to the end of the ready list. */
31555 tmp
= ready
[vec_pos
];
31556 for (i
= vec_pos
; i
< lastpos
; i
++)
31557 ready
[i
] = ready
[i
+ 1];
31558 ready
[lastpos
] = tmp
;
31560 return cached_can_issue_more
;
31565 /* We've either finished a vec/vecload pair, couldn't find an insn to
31566 continue the current pair, or the last insn had nothing to do with
31567 with pairing. In any case, reset the state. */
31571 return cached_can_issue_more
;
31574 /* We are about to begin issuing insns for this clock cycle. */
31577 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED
, int sched_verbose
,
31578 rtx_insn
**ready ATTRIBUTE_UNUSED
,
31579 int *pn_ready ATTRIBUTE_UNUSED
,
31580 int clock_var ATTRIBUTE_UNUSED
)
31582 int n_ready
= *pn_ready
;
31585 fprintf (dump
, "// rs6000_sched_reorder :\n");
31587 /* Reorder the ready list, if the second to last ready insn
31588 is a nonepipeline insn. */
31589 if (rs6000_cpu_attr
== CPU_CELL
&& n_ready
> 1)
31591 if (is_nonpipeline_insn (ready
[n_ready
- 1])
31592 && (recog_memoized (ready
[n_ready
- 2]) > 0))
31593 /* Simply swap first two insns. */
31594 std::swap (ready
[n_ready
- 1], ready
[n_ready
- 2]);
31597 if (rs6000_cpu
== PROCESSOR_POWER6
)
31598 load_store_pendulum
= 0;
31600 return rs6000_issue_rate ();
31603 /* Like rs6000_sched_reorder, but called after issuing each insn. */
31606 rs6000_sched_reorder2 (FILE *dump
, int sched_verbose
, rtx_insn
**ready
,
31607 int *pn_ready
, int clock_var ATTRIBUTE_UNUSED
)
31610 fprintf (dump
, "// rs6000_sched_reorder2 :\n");
31612 /* For Power6, we need to handle some special cases to try and keep the
31613 store queue from overflowing and triggering expensive flushes.
31615 This code monitors how load and store instructions are being issued
31616 and skews the ready list one way or the other to increase the likelihood
31617 that a desired instruction is issued at the proper time.
31619 A couple of things are done. First, we maintain a "load_store_pendulum"
31620 to track the current state of load/store issue.
31622 - If the pendulum is at zero, then no loads or stores have been
31623 issued in the current cycle so we do nothing.
31625 - If the pendulum is 1, then a single load has been issued in this
31626 cycle and we attempt to locate another load in the ready list to
31629 - If the pendulum is -2, then two stores have already been
31630 issued in this cycle, so we increase the priority of the first load
31631 in the ready list to increase it's likelihood of being chosen first
31634 - If the pendulum is -1, then a single store has been issued in this
31635 cycle and we attempt to locate another store in the ready list to
31636 issue with it, preferring a store to an adjacent memory location to
31637 facilitate store pairing in the store queue.
31639 - If the pendulum is 2, then two loads have already been
31640 issued in this cycle, so we increase the priority of the first store
31641 in the ready list to increase it's likelihood of being chosen first
31644 - If the pendulum < -2 or > 2, then do nothing.
31646 Note: This code covers the most common scenarios. There exist non
31647 load/store instructions which make use of the LSU and which
31648 would need to be accounted for to strictly model the behavior
31649 of the machine. Those instructions are currently unaccounted
31650 for to help minimize compile time overhead of this code.
31652 if (rs6000_cpu
== PROCESSOR_POWER6
&& last_scheduled_insn
)
31657 rtx load_mem
, str_mem
;
31659 if (is_store_insn (last_scheduled_insn
, &str_mem
))
31660 /* Issuing a store, swing the load_store_pendulum to the left */
31661 load_store_pendulum
--;
31662 else if (is_load_insn (last_scheduled_insn
, &load_mem
))
31663 /* Issuing a load, swing the load_store_pendulum to the right */
31664 load_store_pendulum
++;
31666 return cached_can_issue_more
;
31668 /* If the pendulum is balanced, or there is only one instruction on
31669 the ready list, then all is well, so return. */
31670 if ((load_store_pendulum
== 0) || (*pn_ready
<= 1))
31671 return cached_can_issue_more
;
31673 if (load_store_pendulum
== 1)
31675 /* A load has been issued in this cycle. Scan the ready list
31676 for another load to issue with it */
31681 if (is_load_insn (ready
[pos
], &load_mem
))
31683 /* Found a load. Move it to the head of the ready list,
31684 and adjust it's priority so that it is more likely to
31687 for (i
=pos
; i
<*pn_ready
-1; i
++)
31688 ready
[i
] = ready
[i
+ 1];
31689 ready
[*pn_ready
-1] = tmp
;
31691 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp
))
31692 INSN_PRIORITY (tmp
)++;
31698 else if (load_store_pendulum
== -2)
31700 /* Two stores have been issued in this cycle. Increase the
31701 priority of the first load in the ready list to favor it for
31702 issuing in the next cycle. */
31707 if (is_load_insn (ready
[pos
], &load_mem
)
31709 && INSN_PRIORITY_KNOWN (ready
[pos
]))
31711 INSN_PRIORITY (ready
[pos
])++;
31713 /* Adjust the pendulum to account for the fact that a load
31714 was found and increased in priority. This is to prevent
31715 increasing the priority of multiple loads */
31716 load_store_pendulum
--;
31723 else if (load_store_pendulum
== -1)
31725 /* A store has been issued in this cycle. Scan the ready list for
31726 another store to issue with it, preferring a store to an adjacent
31728 int first_store_pos
= -1;
31734 if (is_store_insn (ready
[pos
], &str_mem
))
31737 /* Maintain the index of the first store found on the
31739 if (first_store_pos
== -1)
31740 first_store_pos
= pos
;
31742 if (is_store_insn (last_scheduled_insn
, &str_mem2
)
31743 && adjacent_mem_locations (str_mem
, str_mem2
))
31745 /* Found an adjacent store. Move it to the head of the
31746 ready list, and adjust it's priority so that it is
31747 more likely to stay there */
31749 for (i
=pos
; i
<*pn_ready
-1; i
++)
31750 ready
[i
] = ready
[i
+ 1];
31751 ready
[*pn_ready
-1] = tmp
;
31753 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp
))
31754 INSN_PRIORITY (tmp
)++;
31756 first_store_pos
= -1;
31764 if (first_store_pos
>= 0)
31766 /* An adjacent store wasn't found, but a non-adjacent store was,
31767 so move the non-adjacent store to the front of the ready
31768 list, and adjust its priority so that it is more likely to
31770 tmp
= ready
[first_store_pos
];
31771 for (i
=first_store_pos
; i
<*pn_ready
-1; i
++)
31772 ready
[i
] = ready
[i
+ 1];
31773 ready
[*pn_ready
-1] = tmp
;
31774 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp
))
31775 INSN_PRIORITY (tmp
)++;
31778 else if (load_store_pendulum
== 2)
31780 /* Two loads have been issued in this cycle. Increase the priority
31781 of the first store in the ready list to favor it for issuing in
31787 if (is_store_insn (ready
[pos
], &str_mem
)
31789 && INSN_PRIORITY_KNOWN (ready
[pos
]))
31791 INSN_PRIORITY (ready
[pos
])++;
31793 /* Adjust the pendulum to account for the fact that a store
31794 was found and increased in priority. This is to prevent
31795 increasing the priority of multiple stores */
31796 load_store_pendulum
++;
31805 /* Do Power9 dependent reordering if necessary. */
31806 if (rs6000_cpu
== PROCESSOR_POWER9
&& last_scheduled_insn
31807 && recog_memoized (last_scheduled_insn
) >= 0)
31808 return power9_sched_reorder2 (ready
, *pn_ready
- 1);
31810 return cached_can_issue_more
;
31813 /* Return whether the presence of INSN causes a dispatch group termination
31814 of group WHICH_GROUP.
31816 If WHICH_GROUP == current_group, this function will return true if INSN
31817 causes the termination of the current group (i.e, the dispatch group to
31818 which INSN belongs). This means that INSN will be the last insn in the
31819 group it belongs to.
31821 If WHICH_GROUP == previous_group, this function will return true if INSN
31822 causes the termination of the previous group (i.e, the dispatch group that
31823 precedes the group to which INSN belongs). This means that INSN will be
31824 the first insn in the group it belongs to). */
31827 insn_terminates_group_p (rtx_insn
*insn
, enum group_termination which_group
)
31834 first
= insn_must_be_first_in_group (insn
);
31835 last
= insn_must_be_last_in_group (insn
);
31840 if (which_group
== current_group
)
31842 else if (which_group
== previous_group
)
31850 insn_must_be_first_in_group (rtx_insn
*insn
)
31852 enum attr_type type
;
31856 || DEBUG_INSN_P (insn
)
31857 || GET_CODE (PATTERN (insn
)) == USE
31858 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
31861 switch (rs6000_cpu
)
31863 case PROCESSOR_POWER5
:
31864 if (is_cracked_insn (insn
))
31867 case PROCESSOR_POWER4
:
31868 if (is_microcoded_insn (insn
))
31871 if (!rs6000_sched_groups
)
31874 type
= get_attr_type (insn
);
31881 case TYPE_DELAYED_CR
:
31882 case TYPE_CR_LOGICAL
:
31895 case PROCESSOR_POWER6
:
31896 type
= get_attr_type (insn
);
31905 case TYPE_FPCOMPARE
:
31916 if (get_attr_dot (insn
) == DOT_NO
31917 || get_attr_var_shift (insn
) == VAR_SHIFT_NO
)
31922 if (get_attr_size (insn
) == SIZE_32
)
31930 if (get_attr_update (insn
) == UPDATE_YES
)
31938 case PROCESSOR_POWER7
:
31939 type
= get_attr_type (insn
);
31943 case TYPE_CR_LOGICAL
:
31957 if (get_attr_dot (insn
) == DOT_YES
)
31962 if (get_attr_sign_extend (insn
) == SIGN_EXTEND_YES
31963 || get_attr_update (insn
) == UPDATE_YES
)
31970 if (get_attr_update (insn
) == UPDATE_YES
)
31978 case PROCESSOR_POWER8
:
31979 type
= get_attr_type (insn
);
31983 case TYPE_CR_LOGICAL
:
31984 case TYPE_DELAYED_CR
:
31992 case TYPE_VECSTORE
:
31999 if (get_attr_dot (insn
) == DOT_YES
)
32004 if (get_attr_sign_extend (insn
) == SIGN_EXTEND_YES
32005 || get_attr_update (insn
) == UPDATE_YES
)
32010 if (get_attr_update (insn
) == UPDATE_YES
32011 && get_attr_indexed (insn
) == INDEXED_YES
)
32027 insn_must_be_last_in_group (rtx_insn
*insn
)
32029 enum attr_type type
;
32033 || DEBUG_INSN_P (insn
)
32034 || GET_CODE (PATTERN (insn
)) == USE
32035 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
32038 switch (rs6000_cpu
) {
32039 case PROCESSOR_POWER4
:
32040 case PROCESSOR_POWER5
:
32041 if (is_microcoded_insn (insn
))
32044 if (is_branch_slot_insn (insn
))
32048 case PROCESSOR_POWER6
:
32049 type
= get_attr_type (insn
);
32057 case TYPE_FPCOMPARE
:
32068 if (get_attr_dot (insn
) == DOT_NO
32069 || get_attr_var_shift (insn
) == VAR_SHIFT_NO
)
32074 if (get_attr_size (insn
) == SIZE_32
)
32082 case PROCESSOR_POWER7
:
32083 type
= get_attr_type (insn
);
32093 if (get_attr_sign_extend (insn
) == SIGN_EXTEND_YES
32094 && get_attr_update (insn
) == UPDATE_YES
)
32099 if (get_attr_update (insn
) == UPDATE_YES
32100 && get_attr_indexed (insn
) == INDEXED_YES
)
32108 case PROCESSOR_POWER8
:
32109 type
= get_attr_type (insn
);
32121 if (get_attr_sign_extend (insn
) == SIGN_EXTEND_YES
32122 && get_attr_update (insn
) == UPDATE_YES
)
32127 if (get_attr_update (insn
) == UPDATE_YES
32128 && get_attr_indexed (insn
) == INDEXED_YES
)
32143 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
32144 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
32147 is_costly_group (rtx
*group_insns
, rtx next_insn
)
32150 int issue_rate
= rs6000_issue_rate ();
32152 for (i
= 0; i
< issue_rate
; i
++)
32154 sd_iterator_def sd_it
;
32156 rtx insn
= group_insns
[i
];
32161 FOR_EACH_DEP (insn
, SD_LIST_RES_FORW
, sd_it
, dep
)
32163 rtx next
= DEP_CON (dep
);
32165 if (next
== next_insn
32166 && rs6000_is_costly_dependence (dep
, dep_cost (dep
), 0))
32174 /* Utility of the function redefine_groups.
32175 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
32176 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
32177 to keep it "far" (in a separate group) from GROUP_INSNS, following
32178 one of the following schemes, depending on the value of the flag
32179 -minsert_sched_nops = X:
32180 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
32181 in order to force NEXT_INSN into a separate group.
32182 (2) X < sched_finish_regroup_exact: insert exactly X nops.
32183 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
32184 insertion (has a group just ended, how many vacant issue slots remain in the
32185 last group, and how many dispatch groups were encountered so far). */
32188 force_new_group (int sched_verbose
, FILE *dump
, rtx
*group_insns
,
32189 rtx_insn
*next_insn
, bool *group_end
, int can_issue_more
,
32194 int issue_rate
= rs6000_issue_rate ();
32195 bool end
= *group_end
;
32198 if (next_insn
== NULL_RTX
|| DEBUG_INSN_P (next_insn
))
32199 return can_issue_more
;
32201 if (rs6000_sched_insert_nops
> sched_finish_regroup_exact
)
32202 return can_issue_more
;
32204 force
= is_costly_group (group_insns
, next_insn
);
32206 return can_issue_more
;
32208 if (sched_verbose
> 6)
32209 fprintf (dump
,"force: group count = %d, can_issue_more = %d\n",
32210 *group_count
,can_issue_more
);
32212 if (rs6000_sched_insert_nops
== sched_finish_regroup_exact
)
32215 can_issue_more
= 0;
32217 /* Since only a branch can be issued in the last issue_slot, it is
32218 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
32219 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
32220 in this case the last nop will start a new group and the branch
32221 will be forced to the new group. */
32222 if (can_issue_more
&& !is_branch_slot_insn (next_insn
))
32225 /* Do we have a special group ending nop? */
32226 if (rs6000_cpu_attr
== CPU_POWER6
|| rs6000_cpu_attr
== CPU_POWER7
32227 || rs6000_cpu_attr
== CPU_POWER8
)
32229 nop
= gen_group_ending_nop ();
32230 emit_insn_before (nop
, next_insn
);
32231 can_issue_more
= 0;
32234 while (can_issue_more
> 0)
32237 emit_insn_before (nop
, next_insn
);
32245 if (rs6000_sched_insert_nops
< sched_finish_regroup_exact
)
32247 int n_nops
= rs6000_sched_insert_nops
;
32249 /* Nops can't be issued from the branch slot, so the effective
32250 issue_rate for nops is 'issue_rate - 1'. */
32251 if (can_issue_more
== 0)
32252 can_issue_more
= issue_rate
;
32254 if (can_issue_more
== 0)
32256 can_issue_more
= issue_rate
- 1;
32259 for (i
= 0; i
< issue_rate
; i
++)
32261 group_insns
[i
] = 0;
32268 emit_insn_before (nop
, next_insn
);
32269 if (can_issue_more
== issue_rate
- 1) /* new group begins */
32272 if (can_issue_more
== 0)
32274 can_issue_more
= issue_rate
- 1;
32277 for (i
= 0; i
< issue_rate
; i
++)
32279 group_insns
[i
] = 0;
32285 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
32288 /* Is next_insn going to start a new group? */
32291 || (can_issue_more
== 1 && !is_branch_slot_insn (next_insn
))
32292 || (can_issue_more
<= 2 && is_cracked_insn (next_insn
))
32293 || (can_issue_more
< issue_rate
&&
32294 insn_terminates_group_p (next_insn
, previous_group
)));
32295 if (*group_end
&& end
)
32298 if (sched_verbose
> 6)
32299 fprintf (dump
, "done force: group count = %d, can_issue_more = %d\n",
32300 *group_count
, can_issue_more
);
32301 return can_issue_more
;
32304 return can_issue_more
;
32307 /* This function tries to synch the dispatch groups that the compiler "sees"
32308 with the dispatch groups that the processor dispatcher is expected to
32309 form in practice. It tries to achieve this synchronization by forcing the
32310 estimated processor grouping on the compiler (as opposed to the function
32311 'pad_goups' which tries to force the scheduler's grouping on the processor).
32313 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
32314 examines the (estimated) dispatch groups that will be formed by the processor
32315 dispatcher. It marks these group boundaries to reflect the estimated
32316 processor grouping, overriding the grouping that the scheduler had marked.
32317 Depending on the value of the flag '-minsert-sched-nops' this function can
32318 force certain insns into separate groups or force a certain distance between
32319 them by inserting nops, for example, if there exists a "costly dependence"
32322 The function estimates the group boundaries that the processor will form as
32323 follows: It keeps track of how many vacant issue slots are available after
32324 each insn. A subsequent insn will start a new group if one of the following
32326 - no more vacant issue slots remain in the current dispatch group.
32327 - only the last issue slot, which is the branch slot, is vacant, but the next
32328 insn is not a branch.
32329 - only the last 2 or less issue slots, including the branch slot, are vacant,
32330 which means that a cracked insn (which occupies two issue slots) can't be
32331 issued in this group.
32332 - less than 'issue_rate' slots are vacant, and the next insn always needs to
32333 start a new group. */
32336 redefine_groups (FILE *dump
, int sched_verbose
, rtx_insn
*prev_head_insn
,
32339 rtx_insn
*insn
, *next_insn
;
32341 int can_issue_more
;
32344 int group_count
= 0;
32348 issue_rate
= rs6000_issue_rate ();
32349 group_insns
= XALLOCAVEC (rtx
, issue_rate
);
32350 for (i
= 0; i
< issue_rate
; i
++)
32352 group_insns
[i
] = 0;
32354 can_issue_more
= issue_rate
;
32356 insn
= get_next_active_insn (prev_head_insn
, tail
);
32359 while (insn
!= NULL_RTX
)
32361 slot
= (issue_rate
- can_issue_more
);
32362 group_insns
[slot
] = insn
;
32364 rs6000_variable_issue (dump
, sched_verbose
, insn
, can_issue_more
);
32365 if (insn_terminates_group_p (insn
, current_group
))
32366 can_issue_more
= 0;
32368 next_insn
= get_next_active_insn (insn
, tail
);
32369 if (next_insn
== NULL_RTX
)
32370 return group_count
+ 1;
32372 /* Is next_insn going to start a new group? */
32374 = (can_issue_more
== 0
32375 || (can_issue_more
== 1 && !is_branch_slot_insn (next_insn
))
32376 || (can_issue_more
<= 2 && is_cracked_insn (next_insn
))
32377 || (can_issue_more
< issue_rate
&&
32378 insn_terminates_group_p (next_insn
, previous_group
)));
32380 can_issue_more
= force_new_group (sched_verbose
, dump
, group_insns
,
32381 next_insn
, &group_end
, can_issue_more
,
32387 can_issue_more
= 0;
32388 for (i
= 0; i
< issue_rate
; i
++)
32390 group_insns
[i
] = 0;
32394 if (GET_MODE (next_insn
) == TImode
&& can_issue_more
)
32395 PUT_MODE (next_insn
, VOIDmode
);
32396 else if (!can_issue_more
&& GET_MODE (next_insn
) != TImode
)
32397 PUT_MODE (next_insn
, TImode
);
32400 if (can_issue_more
== 0)
32401 can_issue_more
= issue_rate
;
32404 return group_count
;
32407 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
32408 dispatch group boundaries that the scheduler had marked. Pad with nops
32409 any dispatch groups which have vacant issue slots, in order to force the
32410 scheduler's grouping on the processor dispatcher. The function
32411 returns the number of dispatch groups found. */
32414 pad_groups (FILE *dump
, int sched_verbose
, rtx_insn
*prev_head_insn
,
32417 rtx_insn
*insn
, *next_insn
;
32420 int can_issue_more
;
32422 int group_count
= 0;
32424 /* Initialize issue_rate. */
32425 issue_rate
= rs6000_issue_rate ();
32426 can_issue_more
= issue_rate
;
32428 insn
= get_next_active_insn (prev_head_insn
, tail
);
32429 next_insn
= get_next_active_insn (insn
, tail
);
32431 while (insn
!= NULL_RTX
)
32434 rs6000_variable_issue (dump
, sched_verbose
, insn
, can_issue_more
);
32436 group_end
= (next_insn
== NULL_RTX
|| GET_MODE (next_insn
) == TImode
);
32438 if (next_insn
== NULL_RTX
)
32443 /* If the scheduler had marked group termination at this location
32444 (between insn and next_insn), and neither insn nor next_insn will
32445 force group termination, pad the group with nops to force group
32448 && (rs6000_sched_insert_nops
== sched_finish_pad_groups
)
32449 && !insn_terminates_group_p (insn
, current_group
)
32450 && !insn_terminates_group_p (next_insn
, previous_group
))
32452 if (!is_branch_slot_insn (next_insn
))
32455 while (can_issue_more
)
32458 emit_insn_before (nop
, next_insn
);
32463 can_issue_more
= issue_rate
;
32468 next_insn
= get_next_active_insn (insn
, tail
);
32471 return group_count
;
32474 /* We're beginning a new block. Initialize data structures as necessary. */
32477 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED
,
32478 int sched_verbose ATTRIBUTE_UNUSED
,
32479 int max_ready ATTRIBUTE_UNUSED
)
32481 last_scheduled_insn
= NULL
;
32482 load_store_pendulum
= 0;
32487 /* The following function is called at the end of scheduling BB.
32488 After reload, it inserts nops at insn group bundling. */
32491 rs6000_sched_finish (FILE *dump
, int sched_verbose
)
32496 fprintf (dump
, "=== Finishing schedule.\n");
32498 if (reload_completed
&& rs6000_sched_groups
)
32500 /* Do not run sched_finish hook when selective scheduling enabled. */
32501 if (sel_sched_p ())
32504 if (rs6000_sched_insert_nops
== sched_finish_none
)
32507 if (rs6000_sched_insert_nops
== sched_finish_pad_groups
)
32508 n_groups
= pad_groups (dump
, sched_verbose
,
32509 current_sched_info
->prev_head
,
32510 current_sched_info
->next_tail
);
32512 n_groups
= redefine_groups (dump
, sched_verbose
,
32513 current_sched_info
->prev_head
,
32514 current_sched_info
->next_tail
);
32516 if (sched_verbose
>= 6)
32518 fprintf (dump
, "ngroups = %d\n", n_groups
);
32519 print_rtl (dump
, current_sched_info
->prev_head
);
32520 fprintf (dump
, "Done finish_sched\n");
32525 struct rs6000_sched_context
32527 short cached_can_issue_more
;
32528 rtx_insn
*last_scheduled_insn
;
32529 int load_store_pendulum
;
32534 typedef struct rs6000_sched_context rs6000_sched_context_def
;
32535 typedef rs6000_sched_context_def
*rs6000_sched_context_t
;
32537 /* Allocate store for new scheduling context. */
32539 rs6000_alloc_sched_context (void)
32541 return xmalloc (sizeof (rs6000_sched_context_def
));
32544 /* If CLEAN_P is true then initializes _SC with clean data,
32545 and from the global context otherwise. */
32547 rs6000_init_sched_context (void *_sc
, bool clean_p
)
32549 rs6000_sched_context_t sc
= (rs6000_sched_context_t
) _sc
;
32553 sc
->cached_can_issue_more
= 0;
32554 sc
->last_scheduled_insn
= NULL
;
32555 sc
->load_store_pendulum
= 0;
32556 sc
->divide_cnt
= 0;
32557 sc
->vec_pairing
= 0;
32561 sc
->cached_can_issue_more
= cached_can_issue_more
;
32562 sc
->last_scheduled_insn
= last_scheduled_insn
;
32563 sc
->load_store_pendulum
= load_store_pendulum
;
32564 sc
->divide_cnt
= divide_cnt
;
32565 sc
->vec_pairing
= vec_pairing
;
32569 /* Sets the global scheduling context to the one pointed to by _SC. */
32571 rs6000_set_sched_context (void *_sc
)
32573 rs6000_sched_context_t sc
= (rs6000_sched_context_t
) _sc
;
32575 gcc_assert (sc
!= NULL
);
32577 cached_can_issue_more
= sc
->cached_can_issue_more
;
32578 last_scheduled_insn
= sc
->last_scheduled_insn
;
32579 load_store_pendulum
= sc
->load_store_pendulum
;
32580 divide_cnt
= sc
->divide_cnt
;
32581 vec_pairing
= sc
->vec_pairing
;
32586 rs6000_free_sched_context (void *_sc
)
32588 gcc_assert (_sc
!= NULL
);
32594 rs6000_sched_can_speculate_insn (rtx_insn
*insn
)
32596 switch (get_attr_type (insn
))
32611 /* Length in units of the trampoline for entering a nested function. */
32614 rs6000_trampoline_size (void)
32618 switch (DEFAULT_ABI
)
32621 gcc_unreachable ();
32624 ret
= (TARGET_32BIT
) ? 12 : 24;
32628 gcc_assert (!TARGET_32BIT
);
32634 ret
= (TARGET_32BIT
) ? 40 : 48;
32641 /* Emit RTL insns to initialize the variable parts of a trampoline.
32642 FNADDR is an RTX for the address of the function's pure code.
32643 CXT is an RTX for the static chain value for the function. */
32646 rs6000_trampoline_init (rtx m_tramp
, tree fndecl
, rtx cxt
)
32648 int regsize
= (TARGET_32BIT
) ? 4 : 8;
32649 rtx fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
32650 rtx ctx_reg
= force_reg (Pmode
, cxt
);
32651 rtx addr
= force_reg (Pmode
, XEXP (m_tramp
, 0));
32653 switch (DEFAULT_ABI
)
32656 gcc_unreachable ();
32658 /* Under AIX, just build the 3 word function descriptor */
32661 rtx fnmem
, fn_reg
, toc_reg
;
32663 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS
)
32664 error ("you cannot take the address of a nested function if you use "
32665 "the %qs option", "-mno-pointers-to-nested-functions");
32667 fnmem
= gen_const_mem (Pmode
, force_reg (Pmode
, fnaddr
));
32668 fn_reg
= gen_reg_rtx (Pmode
);
32669 toc_reg
= gen_reg_rtx (Pmode
);
32671 /* Macro to shorten the code expansions below. */
32672 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
32674 m_tramp
= replace_equiv_address (m_tramp
, addr
);
32676 emit_move_insn (fn_reg
, MEM_PLUS (fnmem
, 0));
32677 emit_move_insn (toc_reg
, MEM_PLUS (fnmem
, regsize
));
32678 emit_move_insn (MEM_PLUS (m_tramp
, 0), fn_reg
);
32679 emit_move_insn (MEM_PLUS (m_tramp
, regsize
), toc_reg
);
32680 emit_move_insn (MEM_PLUS (m_tramp
, 2*regsize
), ctx_reg
);
32686 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
32690 emit_library_call (gen_rtx_SYMBOL_REF (Pmode
, "__trampoline_setup"),
32691 LCT_NORMAL
, VOIDmode
,
32693 GEN_INT (rs6000_trampoline_size ()), SImode
,
32701 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
32702 identifier as an argument, so the front end shouldn't look it up. */
32705 rs6000_attribute_takes_identifier_p (const_tree attr_id
)
32707 return is_attribute_p ("altivec", attr_id
);
32710 /* Handle the "altivec" attribute. The attribute may have
32711 arguments as follows:
32713 __attribute__((altivec(vector__)))
32714 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
32715 __attribute__((altivec(bool__))) (always followed by 'unsigned')
32717 and may appear more than once (e.g., 'vector bool char') in a
32718 given declaration. */
32721 rs6000_handle_altivec_attribute (tree
*node
,
32722 tree name ATTRIBUTE_UNUSED
,
32724 int flags ATTRIBUTE_UNUSED
,
32725 bool *no_add_attrs
)
32727 tree type
= *node
, result
= NULL_TREE
;
32731 = ((args
&& TREE_CODE (args
) == TREE_LIST
&& TREE_VALUE (args
)
32732 && TREE_CODE (TREE_VALUE (args
)) == IDENTIFIER_NODE
)
32733 ? *IDENTIFIER_POINTER (TREE_VALUE (args
))
32736 while (POINTER_TYPE_P (type
)
32737 || TREE_CODE (type
) == FUNCTION_TYPE
32738 || TREE_CODE (type
) == METHOD_TYPE
32739 || TREE_CODE (type
) == ARRAY_TYPE
)
32740 type
= TREE_TYPE (type
);
32742 mode
= TYPE_MODE (type
);
32744 /* Check for invalid AltiVec type qualifiers. */
32745 if (type
== long_double_type_node
)
32746 error ("use of %<long double%> in AltiVec types is invalid");
32747 else if (type
== boolean_type_node
)
32748 error ("use of boolean types in AltiVec types is invalid");
32749 else if (TREE_CODE (type
) == COMPLEX_TYPE
)
32750 error ("use of %<complex%> in AltiVec types is invalid");
32751 else if (DECIMAL_FLOAT_MODE_P (mode
))
32752 error ("use of decimal floating point types in AltiVec types is invalid");
32753 else if (!TARGET_VSX
)
32755 if (type
== long_unsigned_type_node
|| type
== long_integer_type_node
)
32758 error ("use of %<long%> in AltiVec types is invalid for "
32759 "64-bit code without %qs", "-mvsx");
32760 else if (rs6000_warn_altivec_long
)
32761 warning (0, "use of %<long%> in AltiVec types is deprecated; "
32764 else if (type
== long_long_unsigned_type_node
32765 || type
== long_long_integer_type_node
)
32766 error ("use of %<long long%> in AltiVec types is invalid without %qs",
32768 else if (type
== double_type_node
)
32769 error ("use of %<double%> in AltiVec types is invalid without %qs",
32773 switch (altivec_type
)
32776 unsigned_p
= TYPE_UNSIGNED (type
);
32780 result
= (unsigned_p
? unsigned_V1TI_type_node
: V1TI_type_node
);
32783 result
= (unsigned_p
? unsigned_V2DI_type_node
: V2DI_type_node
);
32786 result
= (unsigned_p
? unsigned_V4SI_type_node
: V4SI_type_node
);
32789 result
= (unsigned_p
? unsigned_V8HI_type_node
: V8HI_type_node
);
32792 result
= (unsigned_p
? unsigned_V16QI_type_node
: V16QI_type_node
);
32794 case E_SFmode
: result
= V4SF_type_node
; break;
32795 case E_DFmode
: result
= V2DF_type_node
; break;
32796 /* If the user says 'vector int bool', we may be handed the 'bool'
32797 attribute _before_ the 'vector' attribute, and so select the
32798 proper type in the 'b' case below. */
32799 case E_V4SImode
: case E_V8HImode
: case E_V16QImode
: case E_V4SFmode
:
32800 case E_V2DImode
: case E_V2DFmode
:
32808 case E_DImode
: case E_V2DImode
: result
= bool_V2DI_type_node
; break;
32809 case E_SImode
: case E_V4SImode
: result
= bool_V4SI_type_node
; break;
32810 case E_HImode
: case E_V8HImode
: result
= bool_V8HI_type_node
; break;
32811 case E_QImode
: case E_V16QImode
: result
= bool_V16QI_type_node
;
32818 case E_V8HImode
: result
= pixel_V8HI_type_node
;
32824 /* Propagate qualifiers attached to the element type
32825 onto the vector type. */
32826 if (result
&& result
!= type
&& TYPE_QUALS (type
))
32827 result
= build_qualified_type (result
, TYPE_QUALS (type
));
32829 *no_add_attrs
= true; /* No need to hang on to the attribute. */
32832 *node
= lang_hooks
.types
.reconstruct_complex_type (*node
, result
);
32837 /* AltiVec defines four built-in scalar types that serve as vector
32838 elements; we must teach the compiler how to mangle them. */
32840 static const char *
32841 rs6000_mangle_type (const_tree type
)
32843 type
= TYPE_MAIN_VARIANT (type
);
32845 if (TREE_CODE (type
) != VOID_TYPE
&& TREE_CODE (type
) != BOOLEAN_TYPE
32846 && TREE_CODE (type
) != INTEGER_TYPE
&& TREE_CODE (type
) != REAL_TYPE
)
32849 if (type
== bool_char_type_node
) return "U6__boolc";
32850 if (type
== bool_short_type_node
) return "U6__bools";
32851 if (type
== pixel_type_node
) return "u7__pixel";
32852 if (type
== bool_int_type_node
) return "U6__booli";
32853 if (type
== bool_long_type_node
) return "U6__booll";
32855 /* Use a unique name for __float128 rather than trying to use "e" or "g". Use
32856 "g" for IBM extended double, no matter whether it is long double (using
32857 -mabi=ibmlongdouble) or the distinct __ibm128 type. */
32858 if (TARGET_FLOAT128_TYPE
)
32860 if (type
== ieee128_float_type_node
)
32861 return "U10__float128";
32863 if (TARGET_LONG_DOUBLE_128
)
32865 if (type
== long_double_type_node
)
32866 return (TARGET_IEEEQUAD
) ? "U10__float128" : "g";
32868 if (type
== ibm128_float_type_node
)
32873 /* Mangle IBM extended float long double as `g' (__float128) on
32874 powerpc*-linux where long-double-64 previously was the default. */
32875 if (TYPE_MAIN_VARIANT (type
) == long_double_type_node
32877 && TARGET_LONG_DOUBLE_128
32878 && !TARGET_IEEEQUAD
)
32881 /* For all other types, use normal C++ mangling. */
32885 /* Handle a "longcall" or "shortcall" attribute; arguments as in
32886 struct attribute_spec.handler. */
32889 rs6000_handle_longcall_attribute (tree
*node
, tree name
,
32890 tree args ATTRIBUTE_UNUSED
,
32891 int flags ATTRIBUTE_UNUSED
,
32892 bool *no_add_attrs
)
32894 if (TREE_CODE (*node
) != FUNCTION_TYPE
32895 && TREE_CODE (*node
) != FIELD_DECL
32896 && TREE_CODE (*node
) != TYPE_DECL
)
32898 warning (OPT_Wattributes
, "%qE attribute only applies to functions",
32900 *no_add_attrs
= true;
32906 /* Set longcall attributes on all functions declared when
32907 rs6000_default_long_calls is true. */
32909 rs6000_set_default_type_attributes (tree type
)
32911 if (rs6000_default_long_calls
32912 && (TREE_CODE (type
) == FUNCTION_TYPE
32913 || TREE_CODE (type
) == METHOD_TYPE
))
32914 TYPE_ATTRIBUTES (type
) = tree_cons (get_identifier ("longcall"),
32916 TYPE_ATTRIBUTES (type
));
32919 darwin_set_default_type_attributes (type
);
32923 /* Return a reference suitable for calling a function with the
32924 longcall attribute. */
32927 rs6000_longcall_ref (rtx call_ref
)
32929 const char *call_name
;
32932 if (GET_CODE (call_ref
) != SYMBOL_REF
)
32935 /* System V adds '.' to the internal name, so skip them. */
32936 call_name
= XSTR (call_ref
, 0);
32937 if (*call_name
== '.')
32939 while (*call_name
== '.')
32942 node
= get_identifier (call_name
);
32943 call_ref
= gen_rtx_SYMBOL_REF (VOIDmode
, IDENTIFIER_POINTER (node
));
32946 return force_reg (Pmode
, call_ref
);
32949 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
32950 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
32953 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
32954 struct attribute_spec.handler. */
32956 rs6000_handle_struct_attribute (tree
*node
, tree name
,
32957 tree args ATTRIBUTE_UNUSED
,
32958 int flags ATTRIBUTE_UNUSED
, bool *no_add_attrs
)
32961 if (DECL_P (*node
))
32963 if (TREE_CODE (*node
) == TYPE_DECL
)
32964 type
= &TREE_TYPE (*node
);
32969 if (!(type
&& (TREE_CODE (*type
) == RECORD_TYPE
32970 || TREE_CODE (*type
) == UNION_TYPE
)))
32972 warning (OPT_Wattributes
, "%qE attribute ignored", name
);
32973 *no_add_attrs
= true;
32976 else if ((is_attribute_p ("ms_struct", name
)
32977 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type
)))
32978 || ((is_attribute_p ("gcc_struct", name
)
32979 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type
)))))
32981 warning (OPT_Wattributes
, "%qE incompatible attribute ignored",
32983 *no_add_attrs
= true;
32990 rs6000_ms_bitfield_layout_p (const_tree record_type
)
32992 return (TARGET_USE_MS_BITFIELD_LAYOUT
&&
32993 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type
)))
32994 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type
));
32997 #ifdef USING_ELFOS_H
32999 /* A get_unnamed_section callback, used for switching to toc_section. */
33002 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED
)
33004 if ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
33005 && TARGET_MINIMAL_TOC
)
33007 if (!toc_initialized
)
33009 fprintf (asm_out_file
, "%s\n", TOC_SECTION_ASM_OP
);
33010 ASM_OUTPUT_ALIGN (asm_out_file
, TARGET_64BIT
? 3 : 2);
33011 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "LCTOC", 0);
33012 fprintf (asm_out_file
, "\t.tc ");
33013 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1[TC],");
33014 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1");
33015 fprintf (asm_out_file
, "\n");
33017 fprintf (asm_out_file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
33018 ASM_OUTPUT_ALIGN (asm_out_file
, TARGET_64BIT
? 3 : 2);
33019 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1");
33020 fprintf (asm_out_file
, " = .+32768\n");
33021 toc_initialized
= 1;
33024 fprintf (asm_out_file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
33026 else if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
33028 fprintf (asm_out_file
, "%s\n", TOC_SECTION_ASM_OP
);
33029 if (!toc_initialized
)
33031 ASM_OUTPUT_ALIGN (asm_out_file
, TARGET_64BIT
? 3 : 2);
33032 toc_initialized
= 1;
33037 fprintf (asm_out_file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
33038 if (!toc_initialized
)
33040 ASM_OUTPUT_ALIGN (asm_out_file
, TARGET_64BIT
? 3 : 2);
33041 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1");
33042 fprintf (asm_out_file
, " = .+32768\n");
33043 toc_initialized
= 1;
33048 /* Implement TARGET_ASM_INIT_SECTIONS. */
33051 rs6000_elf_asm_init_sections (void)
33054 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op
, NULL
);
33057 = get_unnamed_section (SECTION_WRITE
, output_section_asm_op
,
33058 SDATA2_SECTION_ASM_OP
);
33061 /* Implement TARGET_SELECT_RTX_SECTION. */
33064 rs6000_elf_select_rtx_section (machine_mode mode
, rtx x
,
33065 unsigned HOST_WIDE_INT align
)
33067 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x
, mode
))
33068 return toc_section
;
33070 return default_elf_select_rtx_section (mode
, x
, align
);
33073 /* For a SYMBOL_REF, set generic flags and then perform some
33074 target-specific processing.
33076 When the AIX ABI is requested on a non-AIX system, replace the
33077 function name with the real name (with a leading .) rather than the
33078 function descriptor name. This saves a lot of overriding code to
33079 read the prefixes. */
33081 static void rs6000_elf_encode_section_info (tree
, rtx
, int) ATTRIBUTE_UNUSED
;
33083 rs6000_elf_encode_section_info (tree decl
, rtx rtl
, int first
)
33085 default_encode_section_info (decl
, rtl
, first
);
33088 && TREE_CODE (decl
) == FUNCTION_DECL
33090 && DEFAULT_ABI
== ABI_AIX
)
33092 rtx sym_ref
= XEXP (rtl
, 0);
33093 size_t len
= strlen (XSTR (sym_ref
, 0));
33094 char *str
= XALLOCAVEC (char, len
+ 2);
33096 memcpy (str
+ 1, XSTR (sym_ref
, 0), len
+ 1);
33097 XSTR (sym_ref
, 0) = ggc_alloc_string (str
, len
+ 1);
33102 compare_section_name (const char *section
, const char *templ
)
33106 len
= strlen (templ
);
33107 return (strncmp (section
, templ
, len
) == 0
33108 && (section
[len
] == 0 || section
[len
] == '.'));
33112 rs6000_elf_in_small_data_p (const_tree decl
)
33114 if (rs6000_sdata
== SDATA_NONE
)
33117 /* We want to merge strings, so we never consider them small data. */
33118 if (TREE_CODE (decl
) == STRING_CST
)
33121 /* Functions are never in the small data area. */
33122 if (TREE_CODE (decl
) == FUNCTION_DECL
)
33125 if (TREE_CODE (decl
) == VAR_DECL
&& DECL_SECTION_NAME (decl
))
33127 const char *section
= DECL_SECTION_NAME (decl
);
33128 if (compare_section_name (section
, ".sdata")
33129 || compare_section_name (section
, ".sdata2")
33130 || compare_section_name (section
, ".gnu.linkonce.s")
33131 || compare_section_name (section
, ".sbss")
33132 || compare_section_name (section
, ".sbss2")
33133 || compare_section_name (section
, ".gnu.linkonce.sb")
33134 || strcmp (section
, ".PPC.EMB.sdata0") == 0
33135 || strcmp (section
, ".PPC.EMB.sbss0") == 0)
33140 HOST_WIDE_INT size
= int_size_in_bytes (TREE_TYPE (decl
));
33143 && size
<= g_switch_value
33144 /* If it's not public, and we're not going to reference it there,
33145 there's no need to put it in the small data section. */
33146 && (rs6000_sdata
!= SDATA_DATA
|| TREE_PUBLIC (decl
)))
33153 #endif /* USING_ELFOS_H */
33155 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
33158 rs6000_use_blocks_for_constant_p (machine_mode mode
, const_rtx x
)
33160 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x
, mode
);
33163 /* Do not place thread-local symbols refs in the object blocks. */
33166 rs6000_use_blocks_for_decl_p (const_tree decl
)
33168 return !DECL_THREAD_LOCAL_P (decl
);
33171 /* Return a REG that occurs in ADDR with coefficient 1.
33172 ADDR can be effectively incremented by incrementing REG.
33174 r0 is special and we must not select it as an address
33175 register by this routine since our caller will try to
33176 increment the returned register via an "la" instruction. */
33179 find_addr_reg (rtx addr
)
33181 while (GET_CODE (addr
) == PLUS
)
33183 if (GET_CODE (XEXP (addr
, 0)) == REG
33184 && REGNO (XEXP (addr
, 0)) != 0)
33185 addr
= XEXP (addr
, 0);
33186 else if (GET_CODE (XEXP (addr
, 1)) == REG
33187 && REGNO (XEXP (addr
, 1)) != 0)
33188 addr
= XEXP (addr
, 1);
33189 else if (CONSTANT_P (XEXP (addr
, 0)))
33190 addr
= XEXP (addr
, 1);
33191 else if (CONSTANT_P (XEXP (addr
, 1)))
33192 addr
= XEXP (addr
, 0);
33194 gcc_unreachable ();
33196 gcc_assert (GET_CODE (addr
) == REG
&& REGNO (addr
) != 0);
33201 rs6000_fatal_bad_address (rtx op
)
33203 fatal_insn ("bad address", op
);
33208 typedef struct branch_island_d
{
33209 tree function_name
;
33215 static vec
<branch_island
, va_gc
> *branch_islands
;
33217 /* Remember to generate a branch island for far calls to the given
33221 add_compiler_branch_island (tree label_name
, tree function_name
,
33224 branch_island bi
= {function_name
, label_name
, line_number
};
33225 vec_safe_push (branch_islands
, bi
);
33228 /* Generate far-jump branch islands for everything recorded in
33229 branch_islands. Invoked immediately after the last instruction of
33230 the epilogue has been emitted; the branch islands must be appended
33231 to, and contiguous with, the function body. Mach-O stubs are
33232 generated in machopic_output_stub(). */
33235 macho_branch_islands (void)
33239 while (!vec_safe_is_empty (branch_islands
))
33241 branch_island
*bi
= &branch_islands
->last ();
33242 const char *label
= IDENTIFIER_POINTER (bi
->label_name
);
33243 const char *name
= IDENTIFIER_POINTER (bi
->function_name
);
33244 char name_buf
[512];
33245 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
33246 if (name
[0] == '*' || name
[0] == '&')
33247 strcpy (name_buf
, name
+1);
33251 strcpy (name_buf
+1, name
);
33253 strcpy (tmp_buf
, "\n");
33254 strcat (tmp_buf
, label
);
33255 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
33256 if (write_symbols
== DBX_DEBUG
|| write_symbols
== XCOFF_DEBUG
)
33257 dbxout_stabd (N_SLINE
, bi
->line_number
);
33258 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
33261 if (TARGET_LINK_STACK
)
33264 get_ppc476_thunk_name (name
);
33265 strcat (tmp_buf
, ":\n\tmflr r0\n\tbl ");
33266 strcat (tmp_buf
, name
);
33267 strcat (tmp_buf
, "\n");
33268 strcat (tmp_buf
, label
);
33269 strcat (tmp_buf
, "_pic:\n\tmflr r11\n");
33273 strcat (tmp_buf
, ":\n\tmflr r0\n\tbcl 20,31,");
33274 strcat (tmp_buf
, label
);
33275 strcat (tmp_buf
, "_pic\n");
33276 strcat (tmp_buf
, label
);
33277 strcat (tmp_buf
, "_pic:\n\tmflr r11\n");
33280 strcat (tmp_buf
, "\taddis r11,r11,ha16(");
33281 strcat (tmp_buf
, name_buf
);
33282 strcat (tmp_buf
, " - ");
33283 strcat (tmp_buf
, label
);
33284 strcat (tmp_buf
, "_pic)\n");
33286 strcat (tmp_buf
, "\tmtlr r0\n");
33288 strcat (tmp_buf
, "\taddi r12,r11,lo16(");
33289 strcat (tmp_buf
, name_buf
);
33290 strcat (tmp_buf
, " - ");
33291 strcat (tmp_buf
, label
);
33292 strcat (tmp_buf
, "_pic)\n");
33294 strcat (tmp_buf
, "\tmtctr r12\n\tbctr\n");
33298 strcat (tmp_buf
, ":\nlis r12,hi16(");
33299 strcat (tmp_buf
, name_buf
);
33300 strcat (tmp_buf
, ")\n\tori r12,r12,lo16(");
33301 strcat (tmp_buf
, name_buf
);
33302 strcat (tmp_buf
, ")\n\tmtctr r12\n\tbctr");
33304 output_asm_insn (tmp_buf
, 0);
33305 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
33306 if (write_symbols
== DBX_DEBUG
|| write_symbols
== XCOFF_DEBUG
)
33307 dbxout_stabd (N_SLINE
, bi
->line_number
);
33308 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
33309 branch_islands
->pop ();
33313 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
33314 already there or not. */
33317 no_previous_def (tree function_name
)
33322 FOR_EACH_VEC_SAFE_ELT (branch_islands
, ix
, bi
)
33323 if (function_name
== bi
->function_name
)
33328 /* GET_PREV_LABEL gets the label name from the previous definition of
33332 get_prev_label (tree function_name
)
33337 FOR_EACH_VEC_SAFE_ELT (branch_islands
, ix
, bi
)
33338 if (function_name
== bi
->function_name
)
33339 return bi
->label_name
;
33343 /* INSN is either a function call or a millicode call. It may have an
33344 unconditional jump in its delay slot.
33346 CALL_DEST is the routine we are calling. */
33349 output_call (rtx_insn
*insn
, rtx
*operands
, int dest_operand_number
,
33350 int cookie_operand_number
)
33352 static char buf
[256];
33353 if (darwin_emit_branch_islands
33354 && GET_CODE (operands
[dest_operand_number
]) == SYMBOL_REF
33355 && (INTVAL (operands
[cookie_operand_number
]) & CALL_LONG
))
33358 tree funname
= get_identifier (XSTR (operands
[dest_operand_number
], 0));
33360 if (no_previous_def (funname
))
33362 rtx label_rtx
= gen_label_rtx ();
33363 char *label_buf
, temp_buf
[256];
33364 ASM_GENERATE_INTERNAL_LABEL (temp_buf
, "L",
33365 CODE_LABEL_NUMBER (label_rtx
));
33366 label_buf
= temp_buf
[0] == '*' ? temp_buf
+ 1 : temp_buf
;
33367 labelname
= get_identifier (label_buf
);
33368 add_compiler_branch_island (labelname
, funname
, insn_line (insn
));
33371 labelname
= get_prev_label (funname
);
33373 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
33374 instruction will reach 'foo', otherwise link as 'bl L42'".
33375 "L42" should be a 'branch island', that will do a far jump to
33376 'foo'. Branch islands are generated in
33377 macho_branch_islands(). */
33378 sprintf (buf
, "jbsr %%z%d,%.246s",
33379 dest_operand_number
, IDENTIFIER_POINTER (labelname
));
33382 sprintf (buf
, "bl %%z%d", dest_operand_number
);
33386 /* Generate PIC and indirect symbol stubs. */
33389 machopic_output_stub (FILE *file
, const char *symb
, const char *stub
)
33391 unsigned int length
;
33392 char *symbol_name
, *lazy_ptr_name
;
33393 char *local_label_0
;
33394 static int label
= 0;
33396 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
33397 symb
= (*targetm
.strip_name_encoding
) (symb
);
33400 length
= strlen (symb
);
33401 symbol_name
= XALLOCAVEC (char, length
+ 32);
33402 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name
, symb
, length
);
33404 lazy_ptr_name
= XALLOCAVEC (char, length
+ 32);
33405 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name
, symb
, length
);
33408 switch_to_section (darwin_sections
[machopic_picsymbol_stub1_section
]);
33410 switch_to_section (darwin_sections
[machopic_symbol_stub1_section
]);
33414 fprintf (file
, "\t.align 5\n");
33416 fprintf (file
, "%s:\n", stub
);
33417 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
33420 local_label_0
= XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
33421 sprintf (local_label_0
, "\"L%011d$spb\"", label
);
33423 fprintf (file
, "\tmflr r0\n");
33424 if (TARGET_LINK_STACK
)
33427 get_ppc476_thunk_name (name
);
33428 fprintf (file
, "\tbl %s\n", name
);
33429 fprintf (file
, "%s:\n\tmflr r11\n", local_label_0
);
33433 fprintf (file
, "\tbcl 20,31,%s\n", local_label_0
);
33434 fprintf (file
, "%s:\n\tmflr r11\n", local_label_0
);
33436 fprintf (file
, "\taddis r11,r11,ha16(%s-%s)\n",
33437 lazy_ptr_name
, local_label_0
);
33438 fprintf (file
, "\tmtlr r0\n");
33439 fprintf (file
, "\t%s r12,lo16(%s-%s)(r11)\n",
33440 (TARGET_64BIT
? "ldu" : "lwzu"),
33441 lazy_ptr_name
, local_label_0
);
33442 fprintf (file
, "\tmtctr r12\n");
33443 fprintf (file
, "\tbctr\n");
33447 fprintf (file
, "\t.align 4\n");
33449 fprintf (file
, "%s:\n", stub
);
33450 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
33452 fprintf (file
, "\tlis r11,ha16(%s)\n", lazy_ptr_name
);
33453 fprintf (file
, "\t%s r12,lo16(%s)(r11)\n",
33454 (TARGET_64BIT
? "ldu" : "lwzu"),
33456 fprintf (file
, "\tmtctr r12\n");
33457 fprintf (file
, "\tbctr\n");
33460 switch_to_section (darwin_sections
[machopic_lazy_symbol_ptr_section
]);
33461 fprintf (file
, "%s:\n", lazy_ptr_name
);
33462 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
33463 fprintf (file
, "%sdyld_stub_binding_helper\n",
33464 (TARGET_64BIT
? DOUBLE_INT_ASM_OP
: "\t.long\t"));
33467 /* Legitimize PIC addresses. If the address is already
33468 position-independent, we return ORIG. Newly generated
33469 position-independent addresses go into a reg. This is REG if non
33470 zero, otherwise we allocate register(s) as necessary. */
33472 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
33475 rs6000_machopic_legitimize_pic_address (rtx orig
, machine_mode mode
,
33480 if (reg
== NULL
&& !reload_completed
)
33481 reg
= gen_reg_rtx (Pmode
);
33483 if (GET_CODE (orig
) == CONST
)
33487 if (GET_CODE (XEXP (orig
, 0)) == PLUS
33488 && XEXP (XEXP (orig
, 0), 0) == pic_offset_table_rtx
)
33491 gcc_assert (GET_CODE (XEXP (orig
, 0)) == PLUS
);
33493 /* Use a different reg for the intermediate value, as
33494 it will be marked UNCHANGING. */
33495 reg_temp
= !can_create_pseudo_p () ? reg
: gen_reg_rtx (Pmode
);
33496 base
= rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig
, 0), 0),
33499 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig
, 0), 1),
33502 if (GET_CODE (offset
) == CONST_INT
)
33504 if (SMALL_INT (offset
))
33505 return plus_constant (Pmode
, base
, INTVAL (offset
));
33506 else if (!reload_completed
)
33507 offset
= force_reg (Pmode
, offset
);
33510 rtx mem
= force_const_mem (Pmode
, orig
);
33511 return machopic_legitimize_pic_address (mem
, Pmode
, reg
);
33514 return gen_rtx_PLUS (Pmode
, base
, offset
);
33517 /* Fall back on generic machopic code. */
33518 return machopic_legitimize_pic_address (orig
, mode
, reg
);
33521 /* Output a .machine directive for the Darwin assembler, and call
33522 the generic start_file routine. */
33525 rs6000_darwin_file_start (void)
33527 static const struct
33531 HOST_WIDE_INT if_set
;
33533 { "ppc64", "ppc64", MASK_64BIT
},
33534 { "970", "ppc970", MASK_PPC_GPOPT
| MASK_MFCRF
| MASK_POWERPC64
},
33535 { "power4", "ppc970", 0 },
33536 { "G5", "ppc970", 0 },
33537 { "7450", "ppc7450", 0 },
33538 { "7400", "ppc7400", MASK_ALTIVEC
},
33539 { "G4", "ppc7400", 0 },
33540 { "750", "ppc750", 0 },
33541 { "740", "ppc750", 0 },
33542 { "G3", "ppc750", 0 },
33543 { "604e", "ppc604e", 0 },
33544 { "604", "ppc604", 0 },
33545 { "603e", "ppc603", 0 },
33546 { "603", "ppc603", 0 },
33547 { "601", "ppc601", 0 },
33548 { NULL
, "ppc", 0 } };
33549 const char *cpu_id
= "";
33552 rs6000_file_start ();
33553 darwin_file_start ();
33555 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
33557 if (rs6000_default_cpu
!= 0 && rs6000_default_cpu
[0] != '\0')
33558 cpu_id
= rs6000_default_cpu
;
33560 if (global_options_set
.x_rs6000_cpu_index
)
33561 cpu_id
= processor_target_table
[rs6000_cpu_index
].name
;
33563 /* Look through the mapping array. Pick the first name that either
33564 matches the argument, has a bit set in IF_SET that is also set
33565 in the target flags, or has a NULL name. */
33568 while (mapping
[i
].arg
!= NULL
33569 && strcmp (mapping
[i
].arg
, cpu_id
) != 0
33570 && (mapping
[i
].if_set
& rs6000_isa_flags
) == 0)
33573 fprintf (asm_out_file
, "\t.machine %s\n", mapping
[i
].name
);
33576 #endif /* TARGET_MACHO */
33580 rs6000_elf_reloc_rw_mask (void)
33584 else if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
33590 /* Record an element in the table of global constructors. SYMBOL is
33591 a SYMBOL_REF of the function to be called; PRIORITY is a number
33592 between 0 and MAX_INIT_PRIORITY.
33594 This differs from default_named_section_asm_out_constructor in
33595 that we have special handling for -mrelocatable. */
33597 static void rs6000_elf_asm_out_constructor (rtx
, int) ATTRIBUTE_UNUSED
;
33599 rs6000_elf_asm_out_constructor (rtx symbol
, int priority
)
33601 const char *section
= ".ctors";
33604 if (priority
!= DEFAULT_INIT_PRIORITY
)
33606 sprintf (buf
, ".ctors.%.5u",
33607 /* Invert the numbering so the linker puts us in the proper
33608 order; constructors are run from right to left, and the
33609 linker sorts in increasing order. */
33610 MAX_INIT_PRIORITY
- priority
);
33614 switch_to_section (get_section (section
, SECTION_WRITE
, NULL
));
33615 assemble_align (POINTER_SIZE
);
33617 if (DEFAULT_ABI
== ABI_V4
33618 && (TARGET_RELOCATABLE
|| flag_pic
> 1))
33620 fputs ("\t.long (", asm_out_file
);
33621 output_addr_const (asm_out_file
, symbol
);
33622 fputs (")@fixup\n", asm_out_file
);
33625 assemble_integer (symbol
, POINTER_SIZE
/ BITS_PER_UNIT
, POINTER_SIZE
, 1);
33628 static void rs6000_elf_asm_out_destructor (rtx
, int) ATTRIBUTE_UNUSED
;
33630 rs6000_elf_asm_out_destructor (rtx symbol
, int priority
)
33632 const char *section
= ".dtors";
33635 if (priority
!= DEFAULT_INIT_PRIORITY
)
33637 sprintf (buf
, ".dtors.%.5u",
33638 /* Invert the numbering so the linker puts us in the proper
33639 order; constructors are run from right to left, and the
33640 linker sorts in increasing order. */
33641 MAX_INIT_PRIORITY
- priority
);
33645 switch_to_section (get_section (section
, SECTION_WRITE
, NULL
));
33646 assemble_align (POINTER_SIZE
);
33648 if (DEFAULT_ABI
== ABI_V4
33649 && (TARGET_RELOCATABLE
|| flag_pic
> 1))
33651 fputs ("\t.long (", asm_out_file
);
33652 output_addr_const (asm_out_file
, symbol
);
33653 fputs (")@fixup\n", asm_out_file
);
33656 assemble_integer (symbol
, POINTER_SIZE
/ BITS_PER_UNIT
, POINTER_SIZE
, 1);
33660 rs6000_elf_declare_function_name (FILE *file
, const char *name
, tree decl
)
33662 if (TARGET_64BIT
&& DEFAULT_ABI
!= ABI_ELFv2
)
33664 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file
);
33665 ASM_OUTPUT_LABEL (file
, name
);
33666 fputs (DOUBLE_INT_ASM_OP
, file
);
33667 rs6000_output_function_entry (file
, name
);
33668 fputs (",.TOC.@tocbase,0\n\t.previous\n", file
);
33671 fputs ("\t.size\t", file
);
33672 assemble_name (file
, name
);
33673 fputs (",24\n\t.type\t.", file
);
33674 assemble_name (file
, name
);
33675 fputs (",@function\n", file
);
33676 if (TREE_PUBLIC (decl
) && ! DECL_WEAK (decl
))
33678 fputs ("\t.globl\t.", file
);
33679 assemble_name (file
, name
);
33684 ASM_OUTPUT_TYPE_DIRECTIVE (file
, name
, "function");
33685 ASM_DECLARE_RESULT (file
, DECL_RESULT (decl
));
33686 rs6000_output_function_entry (file
, name
);
33687 fputs (":\n", file
);
33692 if (DEFAULT_ABI
== ABI_V4
33693 && (TARGET_RELOCATABLE
|| flag_pic
> 1)
33694 && !TARGET_SECURE_PLT
33695 && (!constant_pool_empty_p () || crtl
->profile
)
33696 && (uses_toc
= uses_TOC ()))
33701 switch_to_other_text_partition ();
33702 (*targetm
.asm_out
.internal_label
) (file
, "LCL", rs6000_pic_labelno
);
33704 fprintf (file
, "\t.long ");
33705 assemble_name (file
, toc_label_name
);
33708 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
33709 assemble_name (file
, buf
);
33712 switch_to_other_text_partition ();
33715 ASM_OUTPUT_TYPE_DIRECTIVE (file
, name
, "function");
33716 ASM_DECLARE_RESULT (file
, DECL_RESULT (decl
));
33718 if (TARGET_CMODEL
== CMODEL_LARGE
&& rs6000_global_entry_point_needed_p ())
33722 (*targetm
.asm_out
.internal_label
) (file
, "LCL", rs6000_pic_labelno
);
33724 fprintf (file
, "\t.quad .TOC.-");
33725 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
33726 assemble_name (file
, buf
);
33730 if (DEFAULT_ABI
== ABI_AIX
)
33732 const char *desc_name
, *orig_name
;
33734 orig_name
= (*targetm
.strip_name_encoding
) (name
);
33735 desc_name
= orig_name
;
33736 while (*desc_name
== '.')
33739 if (TREE_PUBLIC (decl
))
33740 fprintf (file
, "\t.globl %s\n", desc_name
);
33742 fprintf (file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
33743 fprintf (file
, "%s:\n", desc_name
);
33744 fprintf (file
, "\t.long %s\n", orig_name
);
33745 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file
);
33746 fputs ("\t.long 0\n", file
);
33747 fprintf (file
, "\t.previous\n");
33749 ASM_OUTPUT_LABEL (file
, name
);
33752 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED
;
33754 rs6000_elf_file_end (void)
33756 #ifdef HAVE_AS_GNU_ATTRIBUTE
33757 /* ??? The value emitted depends on options active at file end.
33758 Assume anyone using #pragma or attributes that might change
33759 options knows what they are doing. */
33760 if ((TARGET_64BIT
|| DEFAULT_ABI
== ABI_V4
)
33761 && rs6000_passes_float
)
33767 else if (TARGET_SF_FPR
)
33771 if (rs6000_passes_long_double
)
33773 if (!TARGET_LONG_DOUBLE_128
)
33775 else if (TARGET_IEEEQUAD
)
33780 fprintf (asm_out_file
, "\t.gnu_attribute 4, %d\n", fp
);
33782 if (TARGET_32BIT
&& DEFAULT_ABI
== ABI_V4
)
33784 if (rs6000_passes_vector
)
33785 fprintf (asm_out_file
, "\t.gnu_attribute 8, %d\n",
33786 (TARGET_ALTIVEC_ABI
? 2 : 1));
33787 if (rs6000_returns_struct
)
33788 fprintf (asm_out_file
, "\t.gnu_attribute 12, %d\n",
33789 aix_struct_return
? 2 : 1);
33792 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
33793 if (TARGET_32BIT
|| DEFAULT_ABI
== ABI_ELFv2
)
33794 file_end_indicate_exec_stack ();
33797 if (flag_split_stack
)
33798 file_end_indicate_split_stack ();
33802 /* We have expanded a CPU builtin, so we need to emit a reference to
33803 the special symbol that LIBC uses to declare it supports the
33804 AT_PLATFORM and AT_HWCAP/AT_HWCAP2 in the TCB feature. */
33805 switch_to_section (data_section
);
33806 fprintf (asm_out_file
, "\t.align %u\n", TARGET_32BIT
? 2 : 3);
33807 fprintf (asm_out_file
, "\t%s %s\n",
33808 TARGET_32BIT
? ".long" : ".quad", tcb_verification_symbol
);
33815 #ifndef HAVE_XCOFF_DWARF_EXTRAS
33816 #define HAVE_XCOFF_DWARF_EXTRAS 0
33819 static enum unwind_info_type
33820 rs6000_xcoff_debug_unwind_info (void)
33826 rs6000_xcoff_asm_output_anchor (rtx symbol
)
33830 sprintf (buffer
, "$ + " HOST_WIDE_INT_PRINT_DEC
,
33831 SYMBOL_REF_BLOCK_OFFSET (symbol
));
33832 fprintf (asm_out_file
, "%s", SET_ASM_OP
);
33833 RS6000_OUTPUT_BASENAME (asm_out_file
, XSTR (symbol
, 0));
33834 fprintf (asm_out_file
, ",");
33835 RS6000_OUTPUT_BASENAME (asm_out_file
, buffer
);
33836 fprintf (asm_out_file
, "\n");
33840 rs6000_xcoff_asm_globalize_label (FILE *stream
, const char *name
)
33842 fputs (GLOBAL_ASM_OP
, stream
);
33843 RS6000_OUTPUT_BASENAME (stream
, name
);
33844 putc ('\n', stream
);
33847 /* A get_unnamed_decl callback, used for read-only sections. PTR
33848 points to the section string variable. */
33851 rs6000_xcoff_output_readonly_section_asm_op (const void *directive
)
33853 fprintf (asm_out_file
, "\t.csect %s[RO],%s\n",
33854 *(const char *const *) directive
,
33855 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR
);
33858 /* Likewise for read-write sections. */
33861 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive
)
33863 fprintf (asm_out_file
, "\t.csect %s[RW],%s\n",
33864 *(const char *const *) directive
,
33865 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR
);
33869 rs6000_xcoff_output_tls_section_asm_op (const void *directive
)
33871 fprintf (asm_out_file
, "\t.csect %s[TL],%s\n",
33872 *(const char *const *) directive
,
33873 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR
);
33876 /* A get_unnamed_section callback, used for switching to toc_section. */
33879 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED
)
33881 if (TARGET_MINIMAL_TOC
)
33883 /* toc_section is always selected at least once from
33884 rs6000_xcoff_file_start, so this is guaranteed to
33885 always be defined once and only once in each file. */
33886 if (!toc_initialized
)
33888 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file
);
33889 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file
);
33890 toc_initialized
= 1;
33892 fprintf (asm_out_file
, "\t.csect toc_table[RW]%s\n",
33893 (TARGET_32BIT
? "" : ",3"));
33896 fputs ("\t.toc\n", asm_out_file
);
33899 /* Implement TARGET_ASM_INIT_SECTIONS. */
33902 rs6000_xcoff_asm_init_sections (void)
33904 read_only_data_section
33905 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op
,
33906 &xcoff_read_only_section_name
);
33908 private_data_section
33909 = get_unnamed_section (SECTION_WRITE
,
33910 rs6000_xcoff_output_readwrite_section_asm_op
,
33911 &xcoff_private_data_section_name
);
33914 = get_unnamed_section (SECTION_TLS
,
33915 rs6000_xcoff_output_tls_section_asm_op
,
33916 &xcoff_tls_data_section_name
);
33918 tls_private_data_section
33919 = get_unnamed_section (SECTION_TLS
,
33920 rs6000_xcoff_output_tls_section_asm_op
,
33921 &xcoff_private_data_section_name
);
33923 read_only_private_data_section
33924 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op
,
33925 &xcoff_private_data_section_name
);
33928 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op
, NULL
);
33930 readonly_data_section
= read_only_data_section
;
33934 rs6000_xcoff_reloc_rw_mask (void)
33940 rs6000_xcoff_asm_named_section (const char *name
, unsigned int flags
,
33941 tree decl ATTRIBUTE_UNUSED
)
33944 static const char * const suffix
[5] = { "PR", "RO", "RW", "TL", "XO" };
33946 if (flags
& SECTION_EXCLUDE
)
33948 else if (flags
& SECTION_DEBUG
)
33950 fprintf (asm_out_file
, "\t.dwsect %s\n", name
);
33953 else if (flags
& SECTION_CODE
)
33955 else if (flags
& SECTION_TLS
)
33957 else if (flags
& SECTION_WRITE
)
33962 fprintf (asm_out_file
, "\t.csect %s%s[%s],%u\n",
33963 (flags
& SECTION_CODE
) ? "." : "",
33964 name
, suffix
[smclass
], flags
& SECTION_ENTSIZE
);
33967 #define IN_NAMED_SECTION(DECL) \
33968 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
33969 && DECL_SECTION_NAME (DECL) != NULL)
33972 rs6000_xcoff_select_section (tree decl
, int reloc
,
33973 unsigned HOST_WIDE_INT align
)
33975 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
33977 if (align
> BIGGEST_ALIGNMENT
)
33979 resolve_unique_section (decl
, reloc
, true);
33980 if (IN_NAMED_SECTION (decl
))
33981 return get_named_section (decl
, NULL
, reloc
);
33984 if (decl_readonly_section (decl
, reloc
))
33986 if (TREE_PUBLIC (decl
))
33987 return read_only_data_section
;
33989 return read_only_private_data_section
;
33994 if (TREE_CODE (decl
) == VAR_DECL
&& DECL_THREAD_LOCAL_P (decl
))
33996 if (TREE_PUBLIC (decl
))
33997 return tls_data_section
;
33998 else if (bss_initializer_p (decl
))
34000 /* Convert to COMMON to emit in BSS. */
34001 DECL_COMMON (decl
) = 1;
34002 return tls_comm_section
;
34005 return tls_private_data_section
;
34009 if (TREE_PUBLIC (decl
))
34010 return data_section
;
34012 return private_data_section
;
34017 rs6000_xcoff_unique_section (tree decl
, int reloc ATTRIBUTE_UNUSED
)
34021 /* Use select_section for private data and uninitialized data with
34022 alignment <= BIGGEST_ALIGNMENT. */
34023 if (!TREE_PUBLIC (decl
)
34024 || DECL_COMMON (decl
)
34025 || (DECL_INITIAL (decl
) == NULL_TREE
34026 && DECL_ALIGN (decl
) <= BIGGEST_ALIGNMENT
)
34027 || DECL_INITIAL (decl
) == error_mark_node
34028 || (flag_zero_initialized_in_bss
34029 && initializer_zerop (DECL_INITIAL (decl
))))
34032 name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
));
34033 name
= (*targetm
.strip_name_encoding
) (name
);
34034 set_decl_section_name (decl
, name
);
34037 /* Select section for constant in constant pool.
34039 On RS/6000, all constants are in the private read-only data area.
34040 However, if this is being placed in the TOC it must be output as a
34044 rs6000_xcoff_select_rtx_section (machine_mode mode
, rtx x
,
34045 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED
)
34047 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x
, mode
))
34048 return toc_section
;
34050 return read_only_private_data_section
;
34053 /* Remove any trailing [DS] or the like from the symbol name. */
34055 static const char *
34056 rs6000_xcoff_strip_name_encoding (const char *name
)
34061 len
= strlen (name
);
34062 if (name
[len
- 1] == ']')
34063 return ggc_alloc_string (name
, len
- 4);
34068 /* Section attributes. AIX is always PIC. */
34070 static unsigned int
34071 rs6000_xcoff_section_type_flags (tree decl
, const char *name
, int reloc
)
34073 unsigned int align
;
34074 unsigned int flags
= default_section_type_flags (decl
, name
, reloc
);
34076 /* Align to at least UNIT size. */
34077 if ((flags
& SECTION_CODE
) != 0 || !decl
|| !DECL_P (decl
))
34078 align
= MIN_UNITS_PER_WORD
;
34080 /* Increase alignment of large objects if not already stricter. */
34081 align
= MAX ((DECL_ALIGN (decl
) / BITS_PER_UNIT
),
34082 int_size_in_bytes (TREE_TYPE (decl
)) > MIN_UNITS_PER_WORD
34083 ? UNITS_PER_FP_WORD
: MIN_UNITS_PER_WORD
);
34085 return flags
| (exact_log2 (align
) & SECTION_ENTSIZE
);
34088 /* Output at beginning of assembler file.
34090 Initialize the section names for the RS/6000 at this point.
34092 Specify filename, including full path, to assembler.
34094 We want to go into the TOC section so at least one .toc will be emitted.
34095 Also, in order to output proper .bs/.es pairs, we need at least one static
34096 [RW] section emitted.
34098 Finally, declare mcount when profiling to make the assembler happy. */
34101 rs6000_xcoff_file_start (void)
34103 rs6000_gen_section_name (&xcoff_bss_section_name
,
34104 main_input_filename
, ".bss_");
34105 rs6000_gen_section_name (&xcoff_private_data_section_name
,
34106 main_input_filename
, ".rw_");
34107 rs6000_gen_section_name (&xcoff_read_only_section_name
,
34108 main_input_filename
, ".ro_");
34109 rs6000_gen_section_name (&xcoff_tls_data_section_name
,
34110 main_input_filename
, ".tls_");
34111 rs6000_gen_section_name (&xcoff_tbss_section_name
,
34112 main_input_filename
, ".tbss_[UL]");
34114 fputs ("\t.file\t", asm_out_file
);
34115 output_quoted_string (asm_out_file
, main_input_filename
);
34116 fputc ('\n', asm_out_file
);
34117 if (write_symbols
!= NO_DEBUG
)
34118 switch_to_section (private_data_section
);
34119 switch_to_section (toc_section
);
34120 switch_to_section (text_section
);
34122 fprintf (asm_out_file
, "\t.extern %s\n", RS6000_MCOUNT
);
34123 rs6000_file_start ();
34126 /* Output at end of assembler file.
34127 On the RS/6000, referencing data should automatically pull in text. */
34130 rs6000_xcoff_file_end (void)
34132 switch_to_section (text_section
);
34133 fputs ("_section_.text:\n", asm_out_file
);
34134 switch_to_section (data_section
);
34135 fputs (TARGET_32BIT
34136 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
34140 struct declare_alias_data
34143 bool function_descriptor
;
34146 /* Declare alias N. A helper function for for_node_and_aliases. */
34149 rs6000_declare_alias (struct symtab_node
*n
, void *d
)
34151 struct declare_alias_data
*data
= (struct declare_alias_data
*)d
;
34152 /* Main symbol is output specially, because varasm machinery does part of
34153 the job for us - we do not need to declare .globl/lglobs and such. */
34154 if (!n
->alias
|| n
->weakref
)
34157 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n
->decl
)))
34160 /* Prevent assemble_alias from trying to use .set pseudo operation
34161 that does not behave as expected by the middle-end. */
34162 TREE_ASM_WRITTEN (n
->decl
) = true;
34164 const char *name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n
->decl
));
34165 char *buffer
= (char *) alloca (strlen (name
) + 2);
34167 int dollar_inside
= 0;
34169 strcpy (buffer
, name
);
34170 p
= strchr (buffer
, '$');
34174 p
= strchr (p
+ 1, '$');
34176 if (TREE_PUBLIC (n
->decl
))
34178 if (!RS6000_WEAK
|| !DECL_WEAK (n
->decl
))
34180 if (dollar_inside
) {
34181 if (data
->function_descriptor
)
34182 fprintf(data
->file
, "\t.rename .%s,\".%s\"\n", buffer
, name
);
34183 fprintf(data
->file
, "\t.rename %s,\"%s\"\n", buffer
, name
);
34185 if (data
->function_descriptor
)
34187 fputs ("\t.globl .", data
->file
);
34188 RS6000_OUTPUT_BASENAME (data
->file
, buffer
);
34189 putc ('\n', data
->file
);
34191 fputs ("\t.globl ", data
->file
);
34192 RS6000_OUTPUT_BASENAME (data
->file
, buffer
);
34193 putc ('\n', data
->file
);
34195 #ifdef ASM_WEAKEN_DECL
34196 else if (DECL_WEAK (n
->decl
) && !data
->function_descriptor
)
34197 ASM_WEAKEN_DECL (data
->file
, n
->decl
, name
, NULL
);
34204 if (data
->function_descriptor
)
34205 fprintf(data
->file
, "\t.rename .%s,\".%s\"\n", buffer
, name
);
34206 fprintf(data
->file
, "\t.rename %s,\"%s\"\n", buffer
, name
);
34208 if (data
->function_descriptor
)
34210 fputs ("\t.lglobl .", data
->file
);
34211 RS6000_OUTPUT_BASENAME (data
->file
, buffer
);
34212 putc ('\n', data
->file
);
34214 fputs ("\t.lglobl ", data
->file
);
34215 RS6000_OUTPUT_BASENAME (data
->file
, buffer
);
34216 putc ('\n', data
->file
);
34218 if (data
->function_descriptor
)
34219 fputs (".", data
->file
);
34220 RS6000_OUTPUT_BASENAME (data
->file
, buffer
);
34221 fputs (":\n", data
->file
);
34226 #ifdef HAVE_GAS_HIDDEN
34227 /* Helper function to calculate visibility of a DECL
34228 and return the value as a const string. */
34230 static const char *
34231 rs6000_xcoff_visibility (tree decl
)
34233 static const char * const visibility_types
[] = {
34234 "", ",protected", ",hidden", ",internal"
34237 enum symbol_visibility vis
= DECL_VISIBILITY (decl
);
34239 if (TREE_CODE (decl
) == FUNCTION_DECL
34240 && cgraph_node::get (decl
)
34241 && cgraph_node::get (decl
)->instrumentation_clone
34242 && cgraph_node::get (decl
)->instrumented_version
)
34243 vis
= DECL_VISIBILITY (cgraph_node::get (decl
)->instrumented_version
->decl
);
34245 return visibility_types
[vis
];
34250 /* This macro produces the initial definition of a function name.
34251 On the RS/6000, we need to place an extra '.' in the function name and
34252 output the function descriptor.
34253 Dollar signs are converted to underscores.
34255 The csect for the function will have already been created when
34256 text_section was selected. We do have to go back to that csect, however.
34258 The third and fourth parameters to the .function pseudo-op (16 and 044)
34259 are placeholders which no longer have any use.
34261 Because AIX assembler's .set command has unexpected semantics, we output
34262 all aliases as alternative labels in front of the definition. */
34265 rs6000_xcoff_declare_function_name (FILE *file
, const char *name
, tree decl
)
34267 char *buffer
= (char *) alloca (strlen (name
) + 1);
34269 int dollar_inside
= 0;
34270 struct declare_alias_data data
= {file
, false};
34272 strcpy (buffer
, name
);
34273 p
= strchr (buffer
, '$');
34277 p
= strchr (p
+ 1, '$');
34279 if (TREE_PUBLIC (decl
))
34281 if (!RS6000_WEAK
|| !DECL_WEAK (decl
))
34283 if (dollar_inside
) {
34284 fprintf(file
, "\t.rename .%s,\".%s\"\n", buffer
, name
);
34285 fprintf(file
, "\t.rename %s,\"%s\"\n", buffer
, name
);
34287 fputs ("\t.globl .", file
);
34288 RS6000_OUTPUT_BASENAME (file
, buffer
);
34289 #ifdef HAVE_GAS_HIDDEN
34290 fputs (rs6000_xcoff_visibility (decl
), file
);
34297 if (dollar_inside
) {
34298 fprintf(file
, "\t.rename .%s,\".%s\"\n", buffer
, name
);
34299 fprintf(file
, "\t.rename %s,\"%s\"\n", buffer
, name
);
34301 fputs ("\t.lglobl .", file
);
34302 RS6000_OUTPUT_BASENAME (file
, buffer
);
34305 fputs ("\t.csect ", file
);
34306 RS6000_OUTPUT_BASENAME (file
, buffer
);
34307 fputs (TARGET_32BIT
? "[DS]\n" : "[DS],3\n", file
);
34308 RS6000_OUTPUT_BASENAME (file
, buffer
);
34309 fputs (":\n", file
);
34310 symtab_node::get (decl
)->call_for_symbol_and_aliases (rs6000_declare_alias
,
34312 fputs (TARGET_32BIT
? "\t.long ." : "\t.llong .", file
);
34313 RS6000_OUTPUT_BASENAME (file
, buffer
);
34314 fputs (", TOC[tc0], 0\n", file
);
34316 switch_to_section (function_section (decl
));
34318 RS6000_OUTPUT_BASENAME (file
, buffer
);
34319 fputs (":\n", file
);
34320 data
.function_descriptor
= true;
34321 symtab_node::get (decl
)->call_for_symbol_and_aliases (rs6000_declare_alias
,
34323 if (!DECL_IGNORED_P (decl
))
34325 if (write_symbols
== DBX_DEBUG
|| write_symbols
== XCOFF_DEBUG
)
34326 xcoffout_declare_function (file
, decl
, buffer
);
34327 else if (write_symbols
== DWARF2_DEBUG
)
34329 name
= (*targetm
.strip_name_encoding
) (name
);
34330 fprintf (file
, "\t.function .%s,.%s,2,0\n", name
, name
);
34337 /* Output assembly language to globalize a symbol from a DECL,
34338 possibly with visibility. */
34341 rs6000_xcoff_asm_globalize_decl_name (FILE *stream
, tree decl
)
34343 const char *name
= XSTR (XEXP (DECL_RTL (decl
), 0), 0);
34344 fputs (GLOBAL_ASM_OP
, stream
);
34345 RS6000_OUTPUT_BASENAME (stream
, name
);
34346 #ifdef HAVE_GAS_HIDDEN
34347 fputs (rs6000_xcoff_visibility (decl
), stream
);
34349 putc ('\n', stream
);
34352 /* Output assembly language to define a symbol as COMMON from a DECL,
34353 possibly with visibility. */
34356 rs6000_xcoff_asm_output_aligned_decl_common (FILE *stream
,
34357 tree decl ATTRIBUTE_UNUSED
,
34359 unsigned HOST_WIDE_INT size
,
34360 unsigned HOST_WIDE_INT align
)
34362 unsigned HOST_WIDE_INT align2
= 2;
34365 align2
= floor_log2 (align
/ BITS_PER_UNIT
);
34369 fputs (COMMON_ASM_OP
, stream
);
34370 RS6000_OUTPUT_BASENAME (stream
, name
);
34373 "," HOST_WIDE_INT_PRINT_UNSIGNED
"," HOST_WIDE_INT_PRINT_UNSIGNED
,
34376 #ifdef HAVE_GAS_HIDDEN
34378 fputs (rs6000_xcoff_visibility (decl
), stream
);
34380 putc ('\n', stream
);
34383 /* This macro produces the initial definition of a object (variable) name.
34384 Because AIX assembler's .set command has unexpected semantics, we output
34385 all aliases as alternative labels in front of the definition. */
34388 rs6000_xcoff_declare_object_name (FILE *file
, const char *name
, tree decl
)
34390 struct declare_alias_data data
= {file
, false};
34391 RS6000_OUTPUT_BASENAME (file
, name
);
34392 fputs (":\n", file
);
34393 symtab_node::get_create (decl
)->call_for_symbol_and_aliases (rs6000_declare_alias
,
34397 /* Overide the default 'SYMBOL-.' syntax with AIX compatible 'SYMBOL-$'. */
34400 rs6000_asm_output_dwarf_pcrel (FILE *file
, int size
, const char *label
)
34402 fputs (integer_asm_op (size
, FALSE
), file
);
34403 assemble_name (file
, label
);
34404 fputs ("-$", file
);
34407 /* Output a symbol offset relative to the dbase for the current object.
34408 We use __gcc_unwind_dbase as an arbitrary base for dbase and assume
34411 __gcc_unwind_dbase is embedded in all executables/libraries through
34412 libgcc/config/rs6000/crtdbase.S. */
34415 rs6000_asm_output_dwarf_datarel (FILE *file
, int size
, const char *label
)
34417 fputs (integer_asm_op (size
, FALSE
), file
);
34418 assemble_name (file
, label
);
34419 fputs("-__gcc_unwind_dbase", file
);
34424 rs6000_xcoff_encode_section_info (tree decl
, rtx rtl
, int first
)
34428 const char *symname
;
34430 default_encode_section_info (decl
, rtl
, first
);
34432 /* Careful not to prod global register variables. */
34435 symbol
= XEXP (rtl
, 0);
34436 if (GET_CODE (symbol
) != SYMBOL_REF
)
34439 flags
= SYMBOL_REF_FLAGS (symbol
);
34441 if (TREE_CODE (decl
) == VAR_DECL
&& DECL_THREAD_LOCAL_P (decl
))
34442 flags
&= ~SYMBOL_FLAG_HAS_BLOCK_INFO
;
34444 SYMBOL_REF_FLAGS (symbol
) = flags
;
34446 /* Append mapping class to extern decls. */
34447 symname
= XSTR (symbol
, 0);
34448 if (decl
/* sync condition with assemble_external () */
34449 && DECL_P (decl
) && DECL_EXTERNAL (decl
) && TREE_PUBLIC (decl
)
34450 && ((TREE_CODE (decl
) == VAR_DECL
&& !DECL_THREAD_LOCAL_P (decl
))
34451 || TREE_CODE (decl
) == FUNCTION_DECL
)
34452 && symname
[strlen (symname
) - 1] != ']')
34454 char *newname
= (char *) alloca (strlen (symname
) + 5);
34455 strcpy (newname
, symname
);
34456 strcat (newname
, (TREE_CODE (decl
) == FUNCTION_DECL
34457 ? "[DS]" : "[UA]"));
34458 XSTR (symbol
, 0) = ggc_strdup (newname
);
34461 #endif /* HAVE_AS_TLS */
34462 #endif /* TARGET_XCOFF */
34465 rs6000_asm_weaken_decl (FILE *stream
, tree decl
,
34466 const char *name
, const char *val
)
34468 fputs ("\t.weak\t", stream
);
34469 RS6000_OUTPUT_BASENAME (stream
, name
);
34470 if (decl
&& TREE_CODE (decl
) == FUNCTION_DECL
34471 && DEFAULT_ABI
== ABI_AIX
&& DOT_SYMBOLS
)
34474 fputs ("[DS]", stream
);
34475 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34477 fputs (rs6000_xcoff_visibility (decl
), stream
);
34479 fputs ("\n\t.weak\t.", stream
);
34480 RS6000_OUTPUT_BASENAME (stream
, name
);
34482 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34484 fputs (rs6000_xcoff_visibility (decl
), stream
);
34486 fputc ('\n', stream
);
34489 #ifdef ASM_OUTPUT_DEF
34490 ASM_OUTPUT_DEF (stream
, name
, val
);
34492 if (decl
&& TREE_CODE (decl
) == FUNCTION_DECL
34493 && DEFAULT_ABI
== ABI_AIX
&& DOT_SYMBOLS
)
34495 fputs ("\t.set\t.", stream
);
34496 RS6000_OUTPUT_BASENAME (stream
, name
);
34497 fputs (",.", stream
);
34498 RS6000_OUTPUT_BASENAME (stream
, val
);
34499 fputc ('\n', stream
);
34505 /* Return true if INSN should not be copied. */
34508 rs6000_cannot_copy_insn_p (rtx_insn
*insn
)
34510 return recog_memoized (insn
) >= 0
34511 && get_attr_cannot_copy (insn
);
34514 /* Compute a (partial) cost for rtx X. Return true if the complete
34515 cost has been computed, and false if subexpressions should be
34516 scanned. In either case, *TOTAL contains the cost result. */
34519 rs6000_rtx_costs (rtx x
, machine_mode mode
, int outer_code
,
34520 int opno ATTRIBUTE_UNUSED
, int *total
, bool speed
)
34522 int code
= GET_CODE (x
);
34526 /* On the RS/6000, if it is valid in the insn, it is free. */
34528 if (((outer_code
== SET
34529 || outer_code
== PLUS
34530 || outer_code
== MINUS
)
34531 && (satisfies_constraint_I (x
)
34532 || satisfies_constraint_L (x
)))
34533 || (outer_code
== AND
34534 && (satisfies_constraint_K (x
)
34536 ? satisfies_constraint_L (x
)
34537 : satisfies_constraint_J (x
))))
34538 || ((outer_code
== IOR
|| outer_code
== XOR
)
34539 && (satisfies_constraint_K (x
)
34541 ? satisfies_constraint_L (x
)
34542 : satisfies_constraint_J (x
))))
34543 || outer_code
== ASHIFT
34544 || outer_code
== ASHIFTRT
34545 || outer_code
== LSHIFTRT
34546 || outer_code
== ROTATE
34547 || outer_code
== ROTATERT
34548 || outer_code
== ZERO_EXTRACT
34549 || (outer_code
== MULT
34550 && satisfies_constraint_I (x
))
34551 || ((outer_code
== DIV
|| outer_code
== UDIV
34552 || outer_code
== MOD
|| outer_code
== UMOD
)
34553 && exact_log2 (INTVAL (x
)) >= 0)
34554 || (outer_code
== COMPARE
34555 && (satisfies_constraint_I (x
)
34556 || satisfies_constraint_K (x
)))
34557 || ((outer_code
== EQ
|| outer_code
== NE
)
34558 && (satisfies_constraint_I (x
)
34559 || satisfies_constraint_K (x
)
34561 ? satisfies_constraint_L (x
)
34562 : satisfies_constraint_J (x
))))
34563 || (outer_code
== GTU
34564 && satisfies_constraint_I (x
))
34565 || (outer_code
== LTU
34566 && satisfies_constraint_P (x
)))
34571 else if ((outer_code
== PLUS
34572 && reg_or_add_cint_operand (x
, VOIDmode
))
34573 || (outer_code
== MINUS
34574 && reg_or_sub_cint_operand (x
, VOIDmode
))
34575 || ((outer_code
== SET
34576 || outer_code
== IOR
34577 || outer_code
== XOR
)
34579 & ~ (unsigned HOST_WIDE_INT
) 0xffffffff) == 0))
34581 *total
= COSTS_N_INSNS (1);
34587 case CONST_WIDE_INT
:
34591 *total
= !speed
? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34595 /* When optimizing for size, MEM should be slightly more expensive
34596 than generating address, e.g., (plus (reg) (const)).
34597 L1 cache latency is about two instructions. */
34598 *total
= !speed
? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34599 if (rs6000_slow_unaligned_access (mode
, MEM_ALIGN (x
)))
34600 *total
+= COSTS_N_INSNS (100);
34609 if (FLOAT_MODE_P (mode
))
34610 *total
= rs6000_cost
->fp
;
34612 *total
= COSTS_N_INSNS (1);
34616 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
34617 && satisfies_constraint_I (XEXP (x
, 1)))
34619 if (INTVAL (XEXP (x
, 1)) >= -256
34620 && INTVAL (XEXP (x
, 1)) <= 255)
34621 *total
= rs6000_cost
->mulsi_const9
;
34623 *total
= rs6000_cost
->mulsi_const
;
34625 else if (mode
== SFmode
)
34626 *total
= rs6000_cost
->fp
;
34627 else if (FLOAT_MODE_P (mode
))
34628 *total
= rs6000_cost
->dmul
;
34629 else if (mode
== DImode
)
34630 *total
= rs6000_cost
->muldi
;
34632 *total
= rs6000_cost
->mulsi
;
34636 if (mode
== SFmode
)
34637 *total
= rs6000_cost
->fp
;
34639 *total
= rs6000_cost
->dmul
;
34644 if (FLOAT_MODE_P (mode
))
34646 *total
= mode
== DFmode
? rs6000_cost
->ddiv
34647 : rs6000_cost
->sdiv
;
34654 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
34655 && exact_log2 (INTVAL (XEXP (x
, 1))) >= 0)
34657 if (code
== DIV
|| code
== MOD
)
34659 *total
= COSTS_N_INSNS (2);
34662 *total
= COSTS_N_INSNS (1);
34666 if (GET_MODE (XEXP (x
, 1)) == DImode
)
34667 *total
= rs6000_cost
->divdi
;
34669 *total
= rs6000_cost
->divsi
;
34671 /* Add in shift and subtract for MOD unless we have a mod instruction. */
34672 if (!TARGET_MODULO
&& (code
== MOD
|| code
== UMOD
))
34673 *total
+= COSTS_N_INSNS (2);
34677 *total
= COSTS_N_INSNS (TARGET_CTZ
? 1 : 4);
34681 *total
= COSTS_N_INSNS (4);
34685 *total
= COSTS_N_INSNS (TARGET_POPCNTD
? 1 : 6);
34689 *total
= COSTS_N_INSNS (TARGET_CMPB
? 2 : 6);
34693 if (outer_code
== AND
|| outer_code
== IOR
|| outer_code
== XOR
)
34696 *total
= COSTS_N_INSNS (1);
34700 if (CONST_INT_P (XEXP (x
, 1)))
34702 rtx left
= XEXP (x
, 0);
34703 rtx_code left_code
= GET_CODE (left
);
34705 /* rotate-and-mask: 1 insn. */
34706 if ((left_code
== ROTATE
34707 || left_code
== ASHIFT
34708 || left_code
== LSHIFTRT
)
34709 && rs6000_is_valid_shift_mask (XEXP (x
, 1), left
, mode
))
34711 *total
= rtx_cost (XEXP (left
, 0), mode
, left_code
, 0, speed
);
34712 if (!CONST_INT_P (XEXP (left
, 1)))
34713 *total
+= rtx_cost (XEXP (left
, 1), SImode
, left_code
, 1, speed
);
34714 *total
+= COSTS_N_INSNS (1);
34718 /* rotate-and-mask (no rotate), andi., andis.: 1 insn. */
34719 HOST_WIDE_INT val
= INTVAL (XEXP (x
, 1));
34720 if (rs6000_is_valid_and_mask (XEXP (x
, 1), mode
)
34721 || (val
& 0xffff) == val
34722 || (val
& 0xffff0000) == val
34723 || ((val
& 0xffff) == 0 && mode
== SImode
))
34725 *total
= rtx_cost (left
, mode
, AND
, 0, speed
);
34726 *total
+= COSTS_N_INSNS (1);
34731 if (rs6000_is_valid_2insn_and (XEXP (x
, 1), mode
))
34733 *total
= rtx_cost (left
, mode
, AND
, 0, speed
);
34734 *total
+= COSTS_N_INSNS (2);
34739 *total
= COSTS_N_INSNS (1);
34744 *total
= COSTS_N_INSNS (1);
34750 *total
= COSTS_N_INSNS (1);
34754 /* The EXTSWSLI instruction is a combined instruction. Don't count both
34755 the sign extend and shift separately within the insn. */
34756 if (TARGET_EXTSWSLI
&& mode
== DImode
34757 && GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
34758 && GET_MODE (XEXP (XEXP (x
, 0), 0)) == SImode
)
34769 /* Handle mul_highpart. */
34770 if (outer_code
== TRUNCATE
34771 && GET_CODE (XEXP (x
, 0)) == MULT
)
34773 if (mode
== DImode
)
34774 *total
= rs6000_cost
->muldi
;
34776 *total
= rs6000_cost
->mulsi
;
34779 else if (outer_code
== AND
)
34782 *total
= COSTS_N_INSNS (1);
34787 if (GET_CODE (XEXP (x
, 0)) == MEM
)
34790 *total
= COSTS_N_INSNS (1);
34796 if (!FLOAT_MODE_P (mode
))
34798 *total
= COSTS_N_INSNS (1);
34804 case UNSIGNED_FLOAT
:
34807 case FLOAT_TRUNCATE
:
34808 *total
= rs6000_cost
->fp
;
34812 if (mode
== DFmode
)
34813 *total
= rs6000_cost
->sfdf_convert
;
34815 *total
= rs6000_cost
->fp
;
34819 switch (XINT (x
, 1))
34822 *total
= rs6000_cost
->fp
;
34834 *total
= COSTS_N_INSNS (1);
34837 else if (FLOAT_MODE_P (mode
) && TARGET_PPC_GFXOPT
&& TARGET_HARD_FLOAT
)
34839 *total
= rs6000_cost
->fp
;
34848 /* Carry bit requires mode == Pmode.
34849 NEG or PLUS already counted so only add one. */
34851 && (outer_code
== NEG
|| outer_code
== PLUS
))
34853 *total
= COSTS_N_INSNS (1);
34856 if (outer_code
== SET
)
34858 if (XEXP (x
, 1) == const0_rtx
)
34860 if (TARGET_ISEL
&& !TARGET_MFCRF
)
34861 *total
= COSTS_N_INSNS (8);
34863 *total
= COSTS_N_INSNS (2);
34868 *total
= COSTS_N_INSNS (3);
34877 if (outer_code
== SET
&& (XEXP (x
, 1) == const0_rtx
))
34879 if (TARGET_ISEL
&& !TARGET_MFCRF
)
34880 *total
= COSTS_N_INSNS (8);
34882 *total
= COSTS_N_INSNS (2);
34886 if (outer_code
== COMPARE
)
34900 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
34903 rs6000_debug_rtx_costs (rtx x
, machine_mode mode
, int outer_code
,
34904 int opno
, int *total
, bool speed
)
34906 bool ret
= rs6000_rtx_costs (x
, mode
, outer_code
, opno
, total
, speed
);
34909 "\nrs6000_rtx_costs, return = %s, mode = %s, outer_code = %s, "
34910 "opno = %d, total = %d, speed = %s, x:\n",
34911 ret
? "complete" : "scan inner",
34912 GET_MODE_NAME (mode
),
34913 GET_RTX_NAME (outer_code
),
34916 speed
? "true" : "false");
34924 rs6000_insn_cost (rtx_insn
*insn
, bool speed
)
34926 if (recog_memoized (insn
) < 0)
34930 return get_attr_length (insn
);
34932 int cost
= get_attr_cost (insn
);
34936 int n
= get_attr_length (insn
) / 4;
34937 enum attr_type type
= get_attr_type (insn
);
34944 cost
= COSTS_N_INSNS (n
+ 1);
34948 switch (get_attr_size (insn
))
34951 cost
= COSTS_N_INSNS (n
- 1) + rs6000_cost
->mulsi_const9
;
34954 cost
= COSTS_N_INSNS (n
- 1) + rs6000_cost
->mulsi_const
;
34957 cost
= COSTS_N_INSNS (n
- 1) + rs6000_cost
->mulsi
;
34960 cost
= COSTS_N_INSNS (n
- 1) + rs6000_cost
->muldi
;
34963 gcc_unreachable ();
34967 switch (get_attr_size (insn
))
34970 cost
= COSTS_N_INSNS (n
- 1) + rs6000_cost
->divsi
;
34973 cost
= COSTS_N_INSNS (n
- 1) + rs6000_cost
->divdi
;
34976 gcc_unreachable ();
34981 cost
= n
* rs6000_cost
->fp
;
34984 cost
= n
* rs6000_cost
->dmul
;
34987 cost
= n
* rs6000_cost
->sdiv
;
34990 cost
= n
* rs6000_cost
->ddiv
;
34995 cost
= COSTS_N_INSNS (n
+ 2);
34999 cost
= COSTS_N_INSNS (n
);
35005 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
35008 rs6000_debug_address_cost (rtx x
, machine_mode mode
,
35009 addr_space_t as
, bool speed
)
35011 int ret
= TARGET_ADDRESS_COST (x
, mode
, as
, speed
);
35013 fprintf (stderr
, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
35014 ret
, speed
? "true" : "false");
35021 /* A C expression returning the cost of moving data from a register of class
35022 CLASS1 to one of CLASS2. */
35025 rs6000_register_move_cost (machine_mode mode
,
35026 reg_class_t from
, reg_class_t to
)
35030 if (TARGET_DEBUG_COST
)
35033 /* Moves from/to GENERAL_REGS. */
35034 if (reg_classes_intersect_p (to
, GENERAL_REGS
)
35035 || reg_classes_intersect_p (from
, GENERAL_REGS
))
35037 reg_class_t rclass
= from
;
35039 if (! reg_classes_intersect_p (to
, GENERAL_REGS
))
35042 if (rclass
== FLOAT_REGS
|| rclass
== ALTIVEC_REGS
|| rclass
== VSX_REGS
)
35043 ret
= (rs6000_memory_move_cost (mode
, rclass
, false)
35044 + rs6000_memory_move_cost (mode
, GENERAL_REGS
, false));
35046 /* It's more expensive to move CR_REGS than CR0_REGS because of the
35048 else if (rclass
== CR_REGS
)
35051 /* For those processors that have slow LR/CTR moves, make them more
35052 expensive than memory in order to bias spills to memory .*/
35053 else if ((rs6000_cpu
== PROCESSOR_POWER6
35054 || rs6000_cpu
== PROCESSOR_POWER7
35055 || rs6000_cpu
== PROCESSOR_POWER8
35056 || rs6000_cpu
== PROCESSOR_POWER9
)
35057 && reg_classes_intersect_p (rclass
, LINK_OR_CTR_REGS
))
35058 ret
= 6 * hard_regno_nregs (0, mode
);
35061 /* A move will cost one instruction per GPR moved. */
35062 ret
= 2 * hard_regno_nregs (0, mode
);
35065 /* If we have VSX, we can easily move between FPR or Altivec registers. */
35066 else if (VECTOR_MEM_VSX_P (mode
)
35067 && reg_classes_intersect_p (to
, VSX_REGS
)
35068 && reg_classes_intersect_p (from
, VSX_REGS
))
35069 ret
= 2 * hard_regno_nregs (FIRST_FPR_REGNO
, mode
);
35071 /* Moving between two similar registers is just one instruction. */
35072 else if (reg_classes_intersect_p (to
, from
))
35073 ret
= (FLOAT128_2REG_P (mode
)) ? 4 : 2;
35075 /* Everything else has to go through GENERAL_REGS. */
35077 ret
= (rs6000_register_move_cost (mode
, GENERAL_REGS
, to
)
35078 + rs6000_register_move_cost (mode
, from
, GENERAL_REGS
));
35080 if (TARGET_DEBUG_COST
)
35082 if (dbg_cost_ctrl
== 1)
35084 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
35085 ret
, GET_MODE_NAME (mode
), reg_class_names
[from
],
35086 reg_class_names
[to
]);
35093 /* A C expressions returning the cost of moving data of MODE from a register to
35097 rs6000_memory_move_cost (machine_mode mode
, reg_class_t rclass
,
35098 bool in ATTRIBUTE_UNUSED
)
35102 if (TARGET_DEBUG_COST
)
35105 if (reg_classes_intersect_p (rclass
, GENERAL_REGS
))
35106 ret
= 4 * hard_regno_nregs (0, mode
);
35107 else if ((reg_classes_intersect_p (rclass
, FLOAT_REGS
)
35108 || reg_classes_intersect_p (rclass
, VSX_REGS
)))
35109 ret
= 4 * hard_regno_nregs (32, mode
);
35110 else if (reg_classes_intersect_p (rclass
, ALTIVEC_REGS
))
35111 ret
= 4 * hard_regno_nregs (FIRST_ALTIVEC_REGNO
, mode
);
35113 ret
= 4 + rs6000_register_move_cost (mode
, rclass
, GENERAL_REGS
);
35115 if (TARGET_DEBUG_COST
)
35117 if (dbg_cost_ctrl
== 1)
35119 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
35120 ret
, GET_MODE_NAME (mode
), reg_class_names
[rclass
], in
);
35127 /* Returns a code for a target-specific builtin that implements
35128 reciprocal of the function, or NULL_TREE if not available. */
35131 rs6000_builtin_reciprocal (tree fndecl
)
35133 switch (DECL_FUNCTION_CODE (fndecl
))
35135 case VSX_BUILTIN_XVSQRTDP
:
35136 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode
))
35139 return rs6000_builtin_decls
[VSX_BUILTIN_RSQRT_2DF
];
35141 case VSX_BUILTIN_XVSQRTSP
:
35142 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode
))
35145 return rs6000_builtin_decls
[VSX_BUILTIN_RSQRT_4SF
];
35152 /* Load up a constant. If the mode is a vector mode, splat the value across
35153 all of the vector elements. */
35156 rs6000_load_constant_and_splat (machine_mode mode
, REAL_VALUE_TYPE dconst
)
35160 if (mode
== SFmode
|| mode
== DFmode
)
35162 rtx d
= const_double_from_real_value (dconst
, mode
);
35163 reg
= force_reg (mode
, d
);
35165 else if (mode
== V4SFmode
)
35167 rtx d
= const_double_from_real_value (dconst
, SFmode
);
35168 rtvec v
= gen_rtvec (4, d
, d
, d
, d
);
35169 reg
= gen_reg_rtx (mode
);
35170 rs6000_expand_vector_init (reg
, gen_rtx_PARALLEL (mode
, v
));
35172 else if (mode
== V2DFmode
)
35174 rtx d
= const_double_from_real_value (dconst
, DFmode
);
35175 rtvec v
= gen_rtvec (2, d
, d
);
35176 reg
= gen_reg_rtx (mode
);
35177 rs6000_expand_vector_init (reg
, gen_rtx_PARALLEL (mode
, v
));
35180 gcc_unreachable ();
35185 /* Generate an FMA instruction. */
35188 rs6000_emit_madd (rtx target
, rtx m1
, rtx m2
, rtx a
)
35190 machine_mode mode
= GET_MODE (target
);
35193 dst
= expand_ternary_op (mode
, fma_optab
, m1
, m2
, a
, target
, 0);
35194 gcc_assert (dst
!= NULL
);
35197 emit_move_insn (target
, dst
);
35200 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
35203 rs6000_emit_nmsub (rtx dst
, rtx m1
, rtx m2
, rtx a
)
35205 machine_mode mode
= GET_MODE (dst
);
35208 /* This is a tad more complicated, since the fnma_optab is for
35209 a different expression: fma(-m1, m2, a), which is the same
35210 thing except in the case of signed zeros.
35212 Fortunately we know that if FMA is supported that FNMSUB is
35213 also supported in the ISA. Just expand it directly. */
35215 gcc_assert (optab_handler (fma_optab
, mode
) != CODE_FOR_nothing
);
35217 r
= gen_rtx_NEG (mode
, a
);
35218 r
= gen_rtx_FMA (mode
, m1
, m2
, r
);
35219 r
= gen_rtx_NEG (mode
, r
);
35220 emit_insn (gen_rtx_SET (dst
, r
));
35223 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
35224 add a reg_note saying that this was a division. Support both scalar and
35225 vector divide. Assumes no trapping math and finite arguments. */
35228 rs6000_emit_swdiv (rtx dst
, rtx n
, rtx d
, bool note_p
)
35230 machine_mode mode
= GET_MODE (dst
);
35231 rtx one
, x0
, e0
, x1
, xprev
, eprev
, xnext
, enext
, u
, v
;
35234 /* Low precision estimates guarantee 5 bits of accuracy. High
35235 precision estimates guarantee 14 bits of accuracy. SFmode
35236 requires 23 bits of accuracy. DFmode requires 52 bits of
35237 accuracy. Each pass at least doubles the accuracy, leading
35238 to the following. */
35239 int passes
= (TARGET_RECIP_PRECISION
) ? 1 : 3;
35240 if (mode
== DFmode
|| mode
== V2DFmode
)
35243 enum insn_code code
= optab_handler (smul_optab
, mode
);
35244 insn_gen_fn gen_mul
= GEN_FCN (code
);
35246 gcc_assert (code
!= CODE_FOR_nothing
);
35248 one
= rs6000_load_constant_and_splat (mode
, dconst1
);
35250 /* x0 = 1./d estimate */
35251 x0
= gen_reg_rtx (mode
);
35252 emit_insn (gen_rtx_SET (x0
, gen_rtx_UNSPEC (mode
, gen_rtvec (1, d
),
35255 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
35258 /* e0 = 1. - d * x0 */
35259 e0
= gen_reg_rtx (mode
);
35260 rs6000_emit_nmsub (e0
, d
, x0
, one
);
35262 /* x1 = x0 + e0 * x0 */
35263 x1
= gen_reg_rtx (mode
);
35264 rs6000_emit_madd (x1
, e0
, x0
, x0
);
35266 for (i
= 0, xprev
= x1
, eprev
= e0
; i
< passes
- 2;
35267 ++i
, xprev
= xnext
, eprev
= enext
) {
35269 /* enext = eprev * eprev */
35270 enext
= gen_reg_rtx (mode
);
35271 emit_insn (gen_mul (enext
, eprev
, eprev
));
35273 /* xnext = xprev + enext * xprev */
35274 xnext
= gen_reg_rtx (mode
);
35275 rs6000_emit_madd (xnext
, enext
, xprev
, xprev
);
35281 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
35283 /* u = n * xprev */
35284 u
= gen_reg_rtx (mode
);
35285 emit_insn (gen_mul (u
, n
, xprev
));
35287 /* v = n - (d * u) */
35288 v
= gen_reg_rtx (mode
);
35289 rs6000_emit_nmsub (v
, d
, u
, n
);
35291 /* dst = (v * xprev) + u */
35292 rs6000_emit_madd (dst
, v
, xprev
, u
);
35295 add_reg_note (get_last_insn (), REG_EQUAL
, gen_rtx_DIV (mode
, n
, d
));
35298 /* Goldschmidt's Algorithm for single/double-precision floating point
35299 sqrt and rsqrt. Assumes no trapping math and finite arguments. */
35302 rs6000_emit_swsqrt (rtx dst
, rtx src
, bool recip
)
35304 machine_mode mode
= GET_MODE (src
);
35305 rtx e
= gen_reg_rtx (mode
);
35306 rtx g
= gen_reg_rtx (mode
);
35307 rtx h
= gen_reg_rtx (mode
);
35309 /* Low precision estimates guarantee 5 bits of accuracy. High
35310 precision estimates guarantee 14 bits of accuracy. SFmode
35311 requires 23 bits of accuracy. DFmode requires 52 bits of
35312 accuracy. Each pass at least doubles the accuracy, leading
35313 to the following. */
35314 int passes
= (TARGET_RECIP_PRECISION
) ? 1 : 3;
35315 if (mode
== DFmode
|| mode
== V2DFmode
)
35320 enum insn_code code
= optab_handler (smul_optab
, mode
);
35321 insn_gen_fn gen_mul
= GEN_FCN (code
);
35323 gcc_assert (code
!= CODE_FOR_nothing
);
35325 mhalf
= rs6000_load_constant_and_splat (mode
, dconsthalf
);
35327 /* e = rsqrt estimate */
35328 emit_insn (gen_rtx_SET (e
, gen_rtx_UNSPEC (mode
, gen_rtvec (1, src
),
35331 /* If (src == 0.0) filter infinity to prevent NaN for sqrt(0.0). */
35334 rtx zero
= force_reg (mode
, CONST0_RTX (mode
));
35336 if (mode
== SFmode
)
35338 rtx target
= emit_conditional_move (e
, GT
, src
, zero
, mode
,
35341 emit_move_insn (e
, target
);
35345 rtx cond
= gen_rtx_GT (VOIDmode
, e
, zero
);
35346 rs6000_emit_vector_cond_expr (e
, e
, zero
, cond
, src
, zero
);
35350 /* g = sqrt estimate. */
35351 emit_insn (gen_mul (g
, e
, src
));
35352 /* h = 1/(2*sqrt) estimate. */
35353 emit_insn (gen_mul (h
, e
, mhalf
));
35359 rtx t
= gen_reg_rtx (mode
);
35360 rs6000_emit_nmsub (t
, g
, h
, mhalf
);
35361 /* Apply correction directly to 1/rsqrt estimate. */
35362 rs6000_emit_madd (dst
, e
, t
, e
);
35366 for (i
= 0; i
< passes
; i
++)
35368 rtx t1
= gen_reg_rtx (mode
);
35369 rtx g1
= gen_reg_rtx (mode
);
35370 rtx h1
= gen_reg_rtx (mode
);
35372 rs6000_emit_nmsub (t1
, g
, h
, mhalf
);
35373 rs6000_emit_madd (g1
, g
, t1
, g
);
35374 rs6000_emit_madd (h1
, h
, t1
, h
);
35379 /* Multiply by 2 for 1/rsqrt. */
35380 emit_insn (gen_add3_insn (dst
, h
, h
));
35385 rtx t
= gen_reg_rtx (mode
);
35386 rs6000_emit_nmsub (t
, g
, h
, mhalf
);
35387 rs6000_emit_madd (dst
, g
, t
, g
);
35393 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
35394 (Power7) targets. DST is the target, and SRC is the argument operand. */
35397 rs6000_emit_popcount (rtx dst
, rtx src
)
35399 machine_mode mode
= GET_MODE (dst
);
35402 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
35403 if (TARGET_POPCNTD
)
35405 if (mode
== SImode
)
35406 emit_insn (gen_popcntdsi2 (dst
, src
));
35408 emit_insn (gen_popcntddi2 (dst
, src
));
35412 tmp1
= gen_reg_rtx (mode
);
35414 if (mode
== SImode
)
35416 emit_insn (gen_popcntbsi2 (tmp1
, src
));
35417 tmp2
= expand_mult (SImode
, tmp1
, GEN_INT (0x01010101),
35419 tmp2
= force_reg (SImode
, tmp2
);
35420 emit_insn (gen_lshrsi3 (dst
, tmp2
, GEN_INT (24)));
35424 emit_insn (gen_popcntbdi2 (tmp1
, src
));
35425 tmp2
= expand_mult (DImode
, tmp1
,
35426 GEN_INT ((HOST_WIDE_INT
)
35427 0x01010101 << 32 | 0x01010101),
35429 tmp2
= force_reg (DImode
, tmp2
);
35430 emit_insn (gen_lshrdi3 (dst
, tmp2
, GEN_INT (56)));
35435 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
35436 target, and SRC is the argument operand. */
35439 rs6000_emit_parity (rtx dst
, rtx src
)
35441 machine_mode mode
= GET_MODE (dst
);
35444 tmp
= gen_reg_rtx (mode
);
35446 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
35449 if (mode
== SImode
)
35451 emit_insn (gen_popcntbsi2 (tmp
, src
));
35452 emit_insn (gen_paritysi2_cmpb (dst
, tmp
));
35456 emit_insn (gen_popcntbdi2 (tmp
, src
));
35457 emit_insn (gen_paritydi2_cmpb (dst
, tmp
));
35462 if (mode
== SImode
)
35464 /* Is mult+shift >= shift+xor+shift+xor? */
35465 if (rs6000_cost
->mulsi_const
>= COSTS_N_INSNS (3))
35467 rtx tmp1
, tmp2
, tmp3
, tmp4
;
35469 tmp1
= gen_reg_rtx (SImode
);
35470 emit_insn (gen_popcntbsi2 (tmp1
, src
));
35472 tmp2
= gen_reg_rtx (SImode
);
35473 emit_insn (gen_lshrsi3 (tmp2
, tmp1
, GEN_INT (16)));
35474 tmp3
= gen_reg_rtx (SImode
);
35475 emit_insn (gen_xorsi3 (tmp3
, tmp1
, tmp2
));
35477 tmp4
= gen_reg_rtx (SImode
);
35478 emit_insn (gen_lshrsi3 (tmp4
, tmp3
, GEN_INT (8)));
35479 emit_insn (gen_xorsi3 (tmp
, tmp3
, tmp4
));
35482 rs6000_emit_popcount (tmp
, src
);
35483 emit_insn (gen_andsi3 (dst
, tmp
, const1_rtx
));
35487 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
35488 if (rs6000_cost
->muldi
>= COSTS_N_INSNS (5))
35490 rtx tmp1
, tmp2
, tmp3
, tmp4
, tmp5
, tmp6
;
35492 tmp1
= gen_reg_rtx (DImode
);
35493 emit_insn (gen_popcntbdi2 (tmp1
, src
));
35495 tmp2
= gen_reg_rtx (DImode
);
35496 emit_insn (gen_lshrdi3 (tmp2
, tmp1
, GEN_INT (32)));
35497 tmp3
= gen_reg_rtx (DImode
);
35498 emit_insn (gen_xordi3 (tmp3
, tmp1
, tmp2
));
35500 tmp4
= gen_reg_rtx (DImode
);
35501 emit_insn (gen_lshrdi3 (tmp4
, tmp3
, GEN_INT (16)));
35502 tmp5
= gen_reg_rtx (DImode
);
35503 emit_insn (gen_xordi3 (tmp5
, tmp3
, tmp4
));
35505 tmp6
= gen_reg_rtx (DImode
);
35506 emit_insn (gen_lshrdi3 (tmp6
, tmp5
, GEN_INT (8)));
35507 emit_insn (gen_xordi3 (tmp
, tmp5
, tmp6
));
35510 rs6000_emit_popcount (tmp
, src
);
35511 emit_insn (gen_anddi3 (dst
, tmp
, const1_rtx
));
35515 /* Expand an Altivec constant permutation for little endian mode.
35516 There are two issues: First, the two input operands must be
35517 swapped so that together they form a double-wide array in LE
35518 order. Second, the vperm instruction has surprising behavior
35519 in LE mode: it interprets the elements of the source vectors
35520 in BE mode ("left to right") and interprets the elements of
35521 the destination vector in LE mode ("right to left"). To
35522 correct for this, we must subtract each element of the permute
35523 control vector from 31.
35525 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
35526 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
35527 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
35528 serve as the permute control vector. Then, in BE mode,
35532 places the desired result in vr9. However, in LE mode the
35533 vector contents will be
35535 vr10 = 00000003 00000002 00000001 00000000
35536 vr11 = 00000007 00000006 00000005 00000004
35538 The result of the vperm using the same permute control vector is
35540 vr9 = 05000000 07000000 01000000 03000000
35542 That is, the leftmost 4 bytes of vr10 are interpreted as the
35543 source for the rightmost 4 bytes of vr9, and so on.
35545 If we change the permute control vector to
35547 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
35555 vr9 = 00000006 00000004 00000002 00000000. */
35558 altivec_expand_vec_perm_const_le (rtx operands
[4])
35562 rtx constv
, unspec
;
35563 rtx target
= operands
[0];
35564 rtx op0
= operands
[1];
35565 rtx op1
= operands
[2];
35566 rtx sel
= operands
[3];
35568 /* Unpack and adjust the constant selector. */
35569 for (i
= 0; i
< 16; ++i
)
35571 rtx e
= XVECEXP (sel
, 0, i
);
35572 unsigned int elt
= 31 - (INTVAL (e
) & 31);
35573 perm
[i
] = GEN_INT (elt
);
35576 /* Expand to a permute, swapping the inputs and using the
35577 adjusted selector. */
35579 op0
= force_reg (V16QImode
, op0
);
35581 op1
= force_reg (V16QImode
, op1
);
35583 constv
= gen_rtx_CONST_VECTOR (V16QImode
, gen_rtvec_v (16, perm
));
35584 constv
= force_reg (V16QImode
, constv
);
35585 unspec
= gen_rtx_UNSPEC (V16QImode
, gen_rtvec (3, op1
, op0
, constv
),
35587 if (!REG_P (target
))
35589 rtx tmp
= gen_reg_rtx (V16QImode
);
35590 emit_move_insn (tmp
, unspec
);
35594 emit_move_insn (target
, unspec
);
35597 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
35598 permute control vector. But here it's not a constant, so we must
35599 generate a vector NAND or NOR to do the adjustment. */
35602 altivec_expand_vec_perm_le (rtx operands
[4])
35604 rtx notx
, iorx
, unspec
;
35605 rtx target
= operands
[0];
35606 rtx op0
= operands
[1];
35607 rtx op1
= operands
[2];
35608 rtx sel
= operands
[3];
35610 rtx norreg
= gen_reg_rtx (V16QImode
);
35611 machine_mode mode
= GET_MODE (target
);
35613 /* Get everything in regs so the pattern matches. */
35615 op0
= force_reg (mode
, op0
);
35617 op1
= force_reg (mode
, op1
);
35619 sel
= force_reg (V16QImode
, sel
);
35620 if (!REG_P (target
))
35621 tmp
= gen_reg_rtx (mode
);
35623 if (TARGET_P9_VECTOR
)
35625 unspec
= gen_rtx_UNSPEC (mode
, gen_rtvec (3, op0
, op1
, sel
),
35630 /* Invert the selector with a VNAND if available, else a VNOR.
35631 The VNAND is preferred for future fusion opportunities. */
35632 notx
= gen_rtx_NOT (V16QImode
, sel
);
35633 iorx
= (TARGET_P8_VECTOR
35634 ? gen_rtx_IOR (V16QImode
, notx
, notx
)
35635 : gen_rtx_AND (V16QImode
, notx
, notx
));
35636 emit_insn (gen_rtx_SET (norreg
, iorx
));
35638 /* Permute with operands reversed and adjusted selector. */
35639 unspec
= gen_rtx_UNSPEC (mode
, gen_rtvec (3, op1
, op0
, norreg
),
35643 /* Copy into target, possibly by way of a register. */
35644 if (!REG_P (target
))
35646 emit_move_insn (tmp
, unspec
);
35650 emit_move_insn (target
, unspec
);
35653 /* Expand an Altivec constant permutation. Return true if we match
35654 an efficient implementation; false to fall back to VPERM. */
35657 altivec_expand_vec_perm_const (rtx operands
[4])
35659 struct altivec_perm_insn
{
35660 HOST_WIDE_INT mask
;
35661 enum insn_code impl
;
35662 unsigned char perm
[16];
35664 static const struct altivec_perm_insn patterns
[] = {
35665 { OPTION_MASK_ALTIVEC
, CODE_FOR_altivec_vpkuhum_direct
,
35666 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
35667 { OPTION_MASK_ALTIVEC
, CODE_FOR_altivec_vpkuwum_direct
,
35668 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
35669 { OPTION_MASK_ALTIVEC
,
35670 (BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrghb_direct
35671 : CODE_FOR_altivec_vmrglb_direct
),
35672 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
35673 { OPTION_MASK_ALTIVEC
,
35674 (BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrghh_direct
35675 : CODE_FOR_altivec_vmrglh_direct
),
35676 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
35677 { OPTION_MASK_ALTIVEC
,
35678 (BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrghw_direct
35679 : CODE_FOR_altivec_vmrglw_direct
),
35680 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
35681 { OPTION_MASK_ALTIVEC
,
35682 (BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrglb_direct
35683 : CODE_FOR_altivec_vmrghb_direct
),
35684 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
35685 { OPTION_MASK_ALTIVEC
,
35686 (BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrglh_direct
35687 : CODE_FOR_altivec_vmrghh_direct
),
35688 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
35689 { OPTION_MASK_ALTIVEC
,
35690 (BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrglw_direct
35691 : CODE_FOR_altivec_vmrghw_direct
),
35692 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
35693 { OPTION_MASK_P8_VECTOR
,
35694 (BYTES_BIG_ENDIAN
? CODE_FOR_p8_vmrgew_v4sf_direct
35695 : CODE_FOR_p8_vmrgow_v4sf_direct
),
35696 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
35697 { OPTION_MASK_P8_VECTOR
,
35698 (BYTES_BIG_ENDIAN
? CODE_FOR_p8_vmrgow_v4sf_direct
35699 : CODE_FOR_p8_vmrgew_v4sf_direct
),
35700 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
35703 unsigned int i
, j
, elt
, which
;
35704 unsigned char perm
[16];
35705 rtx target
, op0
, op1
, sel
, x
;
35708 target
= operands
[0];
35713 /* Unpack the constant selector. */
35714 for (i
= which
= 0; i
< 16; ++i
)
35716 rtx e
= XVECEXP (sel
, 0, i
);
35717 elt
= INTVAL (e
) & 31;
35718 which
|= (elt
< 16 ? 1 : 2);
35722 /* Simplify the constant selector based on operands. */
35726 gcc_unreachable ();
35730 if (!rtx_equal_p (op0
, op1
))
35735 for (i
= 0; i
< 16; ++i
)
35747 /* Look for splat patterns. */
35752 for (i
= 0; i
< 16; ++i
)
35753 if (perm
[i
] != elt
)
35757 if (!BYTES_BIG_ENDIAN
)
35759 emit_insn (gen_altivec_vspltb_direct (target
, op0
, GEN_INT (elt
)));
35765 for (i
= 0; i
< 16; i
+= 2)
35766 if (perm
[i
] != elt
|| perm
[i
+ 1] != elt
+ 1)
35770 int field
= BYTES_BIG_ENDIAN
? elt
/ 2 : 7 - elt
/ 2;
35771 x
= gen_reg_rtx (V8HImode
);
35772 emit_insn (gen_altivec_vsplth_direct (x
, gen_lowpart (V8HImode
, op0
),
35774 emit_move_insn (target
, gen_lowpart (V16QImode
, x
));
35781 for (i
= 0; i
< 16; i
+= 4)
35783 || perm
[i
+ 1] != elt
+ 1
35784 || perm
[i
+ 2] != elt
+ 2
35785 || perm
[i
+ 3] != elt
+ 3)
35789 int field
= BYTES_BIG_ENDIAN
? elt
/ 4 : 3 - elt
/ 4;
35790 x
= gen_reg_rtx (V4SImode
);
35791 emit_insn (gen_altivec_vspltw_direct (x
, gen_lowpart (V4SImode
, op0
),
35793 emit_move_insn (target
, gen_lowpart (V16QImode
, x
));
35799 /* Look for merge and pack patterns. */
35800 for (j
= 0; j
< ARRAY_SIZE (patterns
); ++j
)
35804 if ((patterns
[j
].mask
& rs6000_isa_flags
) == 0)
35807 elt
= patterns
[j
].perm
[0];
35808 if (perm
[0] == elt
)
35810 else if (perm
[0] == elt
+ 16)
35814 for (i
= 1; i
< 16; ++i
)
35816 elt
= patterns
[j
].perm
[i
];
35818 elt
= (elt
>= 16 ? elt
- 16 : elt
+ 16);
35819 else if (one_vec
&& elt
>= 16)
35821 if (perm
[i
] != elt
)
35826 enum insn_code icode
= patterns
[j
].impl
;
35827 machine_mode omode
= insn_data
[icode
].operand
[0].mode
;
35828 machine_mode imode
= insn_data
[icode
].operand
[1].mode
;
35830 /* For little-endian, don't use vpkuwum and vpkuhum if the
35831 underlying vector type is not V4SI and V8HI, respectively.
35832 For example, using vpkuwum with a V8HI picks up the even
35833 halfwords (BE numbering) when the even halfwords (LE
35834 numbering) are what we need. */
35835 if (!BYTES_BIG_ENDIAN
35836 && icode
== CODE_FOR_altivec_vpkuwum_direct
35837 && ((GET_CODE (op0
) == REG
35838 && GET_MODE (op0
) != V4SImode
)
35839 || (GET_CODE (op0
) == SUBREG
35840 && GET_MODE (XEXP (op0
, 0)) != V4SImode
)))
35842 if (!BYTES_BIG_ENDIAN
35843 && icode
== CODE_FOR_altivec_vpkuhum_direct
35844 && ((GET_CODE (op0
) == REG
35845 && GET_MODE (op0
) != V8HImode
)
35846 || (GET_CODE (op0
) == SUBREG
35847 && GET_MODE (XEXP (op0
, 0)) != V8HImode
)))
35850 /* For little-endian, the two input operands must be swapped
35851 (or swapped back) to ensure proper right-to-left numbering
35853 if (swapped
^ !BYTES_BIG_ENDIAN
)
35854 std::swap (op0
, op1
);
35855 if (imode
!= V16QImode
)
35857 op0
= gen_lowpart (imode
, op0
);
35858 op1
= gen_lowpart (imode
, op1
);
35860 if (omode
== V16QImode
)
35863 x
= gen_reg_rtx (omode
);
35864 emit_insn (GEN_FCN (icode
) (x
, op0
, op1
));
35865 if (omode
!= V16QImode
)
35866 emit_move_insn (target
, gen_lowpart (V16QImode
, x
));
35871 if (!BYTES_BIG_ENDIAN
)
35873 altivec_expand_vec_perm_const_le (operands
);
35880 /* Expand a Paired Single or VSX Permute Doubleword constant permutation.
35881 Return true if we match an efficient implementation. */
35884 rs6000_expand_vec_perm_const_1 (rtx target
, rtx op0
, rtx op1
,
35885 unsigned char perm0
, unsigned char perm1
)
35889 /* If both selectors come from the same operand, fold to single op. */
35890 if ((perm0
& 2) == (perm1
& 2))
35897 /* If both operands are equal, fold to simpler permutation. */
35898 if (rtx_equal_p (op0
, op1
))
35901 perm1
= (perm1
& 1) + 2;
35903 /* If the first selector comes from the second operand, swap. */
35904 else if (perm0
& 2)
35910 std::swap (op0
, op1
);
35912 /* If the second selector does not come from the second operand, fail. */
35913 else if ((perm1
& 2) == 0)
35917 if (target
!= NULL
)
35919 machine_mode vmode
, dmode
;
35922 vmode
= GET_MODE (target
);
35923 gcc_assert (GET_MODE_NUNITS (vmode
) == 2);
35924 dmode
= mode_for_vector (GET_MODE_INNER (vmode
), 4).require ();
35925 x
= gen_rtx_VEC_CONCAT (dmode
, op0
, op1
);
35926 v
= gen_rtvec (2, GEN_INT (perm0
), GEN_INT (perm1
));
35927 x
= gen_rtx_VEC_SELECT (vmode
, x
, gen_rtx_PARALLEL (VOIDmode
, v
));
35928 emit_insn (gen_rtx_SET (target
, x
));
35934 rs6000_expand_vec_perm_const (rtx operands
[4])
35936 rtx target
, op0
, op1
, sel
;
35937 unsigned char perm0
, perm1
;
35939 target
= operands
[0];
35944 /* Unpack the constant selector. */
35945 perm0
= INTVAL (XVECEXP (sel
, 0, 0)) & 3;
35946 perm1
= INTVAL (XVECEXP (sel
, 0, 1)) & 3;
35948 return rs6000_expand_vec_perm_const_1 (target
, op0
, op1
, perm0
, perm1
);
35951 /* Test whether a constant permutation is supported. */
35954 rs6000_vectorize_vec_perm_const_ok (machine_mode vmode
, vec_perm_indices sel
)
35956 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
35957 if (TARGET_ALTIVEC
)
35960 /* Check for ps_merge* or evmerge* insns. */
35961 if (TARGET_PAIRED_FLOAT
&& vmode
== V2SFmode
)
35963 rtx op0
= gen_raw_REG (vmode
, LAST_VIRTUAL_REGISTER
+ 1);
35964 rtx op1
= gen_raw_REG (vmode
, LAST_VIRTUAL_REGISTER
+ 2);
35965 return rs6000_expand_vec_perm_const_1 (NULL
, op0
, op1
, sel
[0], sel
[1]);
35971 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave. */
35974 rs6000_do_expand_vec_perm (rtx target
, rtx op0
, rtx op1
,
35975 machine_mode vmode
, unsigned nelt
, rtx perm
[])
35977 machine_mode imode
;
35981 if (GET_MODE_CLASS (vmode
) != MODE_VECTOR_INT
)
35982 imode
= mode_for_int_vector (vmode
).require ();
35984 x
= gen_rtx_CONST_VECTOR (imode
, gen_rtvec_v (nelt
, perm
));
35985 x
= expand_vec_perm (vmode
, op0
, op1
, x
, target
);
35987 emit_move_insn (target
, x
);
35990 /* Expand an extract even operation. */
35993 rs6000_expand_extract_even (rtx target
, rtx op0
, rtx op1
)
35995 machine_mode vmode
= GET_MODE (target
);
35996 unsigned i
, nelt
= GET_MODE_NUNITS (vmode
);
35999 for (i
= 0; i
< nelt
; i
++)
36000 perm
[i
] = GEN_INT (i
* 2);
36002 rs6000_do_expand_vec_perm (target
, op0
, op1
, vmode
, nelt
, perm
);
36005 /* Expand a vector interleave operation. */
36008 rs6000_expand_interleave (rtx target
, rtx op0
, rtx op1
, bool highp
)
36010 machine_mode vmode
= GET_MODE (target
);
36011 unsigned i
, high
, nelt
= GET_MODE_NUNITS (vmode
);
36014 high
= (highp
? 0 : nelt
/ 2);
36015 for (i
= 0; i
< nelt
/ 2; i
++)
36017 perm
[i
* 2] = GEN_INT (i
+ high
);
36018 perm
[i
* 2 + 1] = GEN_INT (i
+ nelt
+ high
);
36021 rs6000_do_expand_vec_perm (target
, op0
, op1
, vmode
, nelt
, perm
);
36024 /* Scale a V2DF vector SRC by two to the SCALE and place in TGT. */
36026 rs6000_scale_v2df (rtx tgt
, rtx src
, int scale
)
36028 HOST_WIDE_INT
hwi_scale (scale
);
36029 REAL_VALUE_TYPE r_pow
;
36030 rtvec v
= rtvec_alloc (2);
36032 rtx scale_vec
= gen_reg_rtx (V2DFmode
);
36033 (void)real_powi (&r_pow
, DFmode
, &dconst2
, hwi_scale
);
36034 elt
= const_double_from_real_value (r_pow
, DFmode
);
36035 RTVEC_ELT (v
, 0) = elt
;
36036 RTVEC_ELT (v
, 1) = elt
;
36037 rs6000_expand_vector_init (scale_vec
, gen_rtx_PARALLEL (V2DFmode
, v
));
36038 emit_insn (gen_mulv2df3 (tgt
, src
, scale_vec
));
36041 /* Return an RTX representing where to find the function value of a
36042 function returning MODE. */
36044 rs6000_complex_function_value (machine_mode mode
)
36046 unsigned int regno
;
36048 machine_mode inner
= GET_MODE_INNER (mode
);
36049 unsigned int inner_bytes
= GET_MODE_UNIT_SIZE (mode
);
36051 if (TARGET_FLOAT128_TYPE
36053 || (mode
== TCmode
&& TARGET_IEEEQUAD
)))
36054 regno
= ALTIVEC_ARG_RETURN
;
36056 else if (FLOAT_MODE_P (mode
) && TARGET_HARD_FLOAT
)
36057 regno
= FP_ARG_RETURN
;
36061 regno
= GP_ARG_RETURN
;
36063 /* 32-bit is OK since it'll go in r3/r4. */
36064 if (TARGET_32BIT
&& inner_bytes
>= 4)
36065 return gen_rtx_REG (mode
, regno
);
36068 if (inner_bytes
>= 8)
36069 return gen_rtx_REG (mode
, regno
);
36071 r1
= gen_rtx_EXPR_LIST (inner
, gen_rtx_REG (inner
, regno
),
36073 r2
= gen_rtx_EXPR_LIST (inner
, gen_rtx_REG (inner
, regno
+ 1),
36074 GEN_INT (inner_bytes
));
36075 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, r1
, r2
));
36078 /* Return an rtx describing a return value of MODE as a PARALLEL
36079 in N_ELTS registers, each of mode ELT_MODE, starting at REGNO,
36080 stride REG_STRIDE. */
36083 rs6000_parallel_return (machine_mode mode
,
36084 int n_elts
, machine_mode elt_mode
,
36085 unsigned int regno
, unsigned int reg_stride
)
36087 rtx par
= gen_rtx_PARALLEL (mode
, rtvec_alloc (n_elts
));
36090 for (i
= 0; i
< n_elts
; i
++)
36092 rtx r
= gen_rtx_REG (elt_mode
, regno
);
36093 rtx off
= GEN_INT (i
* GET_MODE_SIZE (elt_mode
));
36094 XVECEXP (par
, 0, i
) = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
36095 regno
+= reg_stride
;
36101 /* Target hook for TARGET_FUNCTION_VALUE.
36103 An integer value is in r3 and a floating-point value is in fp1,
36104 unless -msoft-float. */
36107 rs6000_function_value (const_tree valtype
,
36108 const_tree fn_decl_or_type ATTRIBUTE_UNUSED
,
36109 bool outgoing ATTRIBUTE_UNUSED
)
36112 unsigned int regno
;
36113 machine_mode elt_mode
;
36116 /* Special handling for structs in darwin64. */
36118 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype
), valtype
))
36120 CUMULATIVE_ARGS valcum
;
36124 valcum
.fregno
= FP_ARG_MIN_REG
;
36125 valcum
.vregno
= ALTIVEC_ARG_MIN_REG
;
36126 /* Do a trial code generation as if this were going to be passed as
36127 an argument; if any part goes in memory, we return NULL. */
36128 valret
= rs6000_darwin64_record_arg (&valcum
, valtype
, true, /* retval= */ true);
36131 /* Otherwise fall through to standard ABI rules. */
36134 mode
= TYPE_MODE (valtype
);
36136 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
36137 if (rs6000_discover_homogeneous_aggregate (mode
, valtype
, &elt_mode
, &n_elts
))
36139 int first_reg
, n_regs
;
36141 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (elt_mode
))
36143 /* _Decimal128 must use even/odd register pairs. */
36144 first_reg
= (elt_mode
== TDmode
) ? FP_ARG_RETURN
+ 1 : FP_ARG_RETURN
;
36145 n_regs
= (GET_MODE_SIZE (elt_mode
) + 7) >> 3;
36149 first_reg
= ALTIVEC_ARG_RETURN
;
36153 return rs6000_parallel_return (mode
, n_elts
, elt_mode
, first_reg
, n_regs
);
36156 /* Some return value types need be split in -mpowerpc64, 32bit ABI. */
36157 if (TARGET_32BIT
&& TARGET_POWERPC64
)
36166 int count
= GET_MODE_SIZE (mode
) / 4;
36167 return rs6000_parallel_return (mode
, count
, SImode
, GP_ARG_RETURN
, 1);
36170 if ((INTEGRAL_TYPE_P (valtype
)
36171 && GET_MODE_BITSIZE (mode
) < (TARGET_32BIT
? 32 : 64))
36172 || POINTER_TYPE_P (valtype
))
36173 mode
= TARGET_32BIT
? SImode
: DImode
;
36175 if (DECIMAL_FLOAT_MODE_P (mode
) && TARGET_HARD_FLOAT
)
36176 /* _Decimal128 must use an even/odd register pair. */
36177 regno
= (mode
== TDmode
) ? FP_ARG_RETURN
+ 1 : FP_ARG_RETURN
;
36178 else if (SCALAR_FLOAT_TYPE_P (valtype
) && TARGET_HARD_FLOAT
36179 && !FLOAT128_VECTOR_P (mode
)
36180 && ((TARGET_SINGLE_FLOAT
&& (mode
== SFmode
)) || TARGET_DOUBLE_FLOAT
))
36181 regno
= FP_ARG_RETURN
;
36182 else if (TREE_CODE (valtype
) == COMPLEX_TYPE
36183 && targetm
.calls
.split_complex_arg
)
36184 return rs6000_complex_function_value (mode
);
36185 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
36186 return register is used in both cases, and we won't see V2DImode/V2DFmode
36187 for pure altivec, combine the two cases. */
36188 else if ((TREE_CODE (valtype
) == VECTOR_TYPE
|| FLOAT128_VECTOR_P (mode
))
36189 && TARGET_ALTIVEC
&& TARGET_ALTIVEC_ABI
36190 && ALTIVEC_OR_VSX_VECTOR_MODE (mode
))
36191 regno
= ALTIVEC_ARG_RETURN
;
36193 regno
= GP_ARG_RETURN
;
36195 return gen_rtx_REG (mode
, regno
);
36198 /* Define how to find the value returned by a library function
36199 assuming the value has mode MODE. */
36201 rs6000_libcall_value (machine_mode mode
)
36203 unsigned int regno
;
36205 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
36206 if (TARGET_32BIT
&& TARGET_POWERPC64
&& mode
== DImode
)
36207 return rs6000_parallel_return (mode
, 2, SImode
, GP_ARG_RETURN
, 1);
36209 if (DECIMAL_FLOAT_MODE_P (mode
) && TARGET_HARD_FLOAT
)
36210 /* _Decimal128 must use an even/odd register pair. */
36211 regno
= (mode
== TDmode
) ? FP_ARG_RETURN
+ 1 : FP_ARG_RETURN
;
36212 else if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode
)
36213 && TARGET_HARD_FLOAT
36214 && ((TARGET_SINGLE_FLOAT
&& mode
== SFmode
) || TARGET_DOUBLE_FLOAT
))
36215 regno
= FP_ARG_RETURN
;
36216 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
36217 return register is used in both cases, and we won't see V2DImode/V2DFmode
36218 for pure altivec, combine the two cases. */
36219 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode
)
36220 && TARGET_ALTIVEC
&& TARGET_ALTIVEC_ABI
)
36221 regno
= ALTIVEC_ARG_RETURN
;
36222 else if (COMPLEX_MODE_P (mode
) && targetm
.calls
.split_complex_arg
)
36223 return rs6000_complex_function_value (mode
);
36225 regno
= GP_ARG_RETURN
;
36227 return gen_rtx_REG (mode
, regno
);
36230 /* Compute register pressure classes. We implement the target hook to avoid
36231 IRA picking something like NON_SPECIAL_REGS as a pressure class, which can
36232 lead to incorrect estimates of number of available registers and therefor
36233 increased register pressure/spill. */
36235 rs6000_compute_pressure_classes (enum reg_class
*pressure_classes
)
36240 pressure_classes
[n
++] = GENERAL_REGS
;
36242 pressure_classes
[n
++] = VSX_REGS
;
36245 if (TARGET_ALTIVEC
)
36246 pressure_classes
[n
++] = ALTIVEC_REGS
;
36247 if (TARGET_HARD_FLOAT
)
36248 pressure_classes
[n
++] = FLOAT_REGS
;
36250 pressure_classes
[n
++] = CR_REGS
;
36251 pressure_classes
[n
++] = SPECIAL_REGS
;
36256 /* Given FROM and TO register numbers, say whether this elimination is allowed.
36257 Frame pointer elimination is automatically handled.
36259 For the RS/6000, if frame pointer elimination is being done, we would like
36260 to convert ap into fp, not sp.
36262 We need r30 if -mminimal-toc was specified, and there are constant pool
36266 rs6000_can_eliminate (const int from
, const int to
)
36268 return (from
== ARG_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
36269 ? ! frame_pointer_needed
36270 : from
== RS6000_PIC_OFFSET_TABLE_REGNUM
36271 ? ! TARGET_MINIMAL_TOC
|| TARGET_NO_TOC
36272 || constant_pool_empty_p ()
36276 /* Define the offset between two registers, FROM to be eliminated and its
36277 replacement TO, at the start of a routine. */
36279 rs6000_initial_elimination_offset (int from
, int to
)
36281 rs6000_stack_t
*info
= rs6000_stack_info ();
36282 HOST_WIDE_INT offset
;
36284 if (from
== HARD_FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
36285 offset
= info
->push_p
? 0 : -info
->total_size
;
36286 else if (from
== FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
36288 offset
= info
->push_p
? 0 : -info
->total_size
;
36289 if (FRAME_GROWS_DOWNWARD
)
36290 offset
+= info
->fixed_size
+ info
->vars_size
+ info
->parm_size
;
36292 else if (from
== FRAME_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
36293 offset
= FRAME_GROWS_DOWNWARD
36294 ? info
->fixed_size
+ info
->vars_size
+ info
->parm_size
36296 else if (from
== ARG_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
36297 offset
= info
->total_size
;
36298 else if (from
== ARG_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
36299 offset
= info
->push_p
? info
->total_size
: 0;
36300 else if (from
== RS6000_PIC_OFFSET_TABLE_REGNUM
)
36303 gcc_unreachable ();
36308 /* Fill in sizes of registers used by unwinder. */
36311 rs6000_init_dwarf_reg_sizes_extra (tree address
)
36313 if (TARGET_MACHO
&& ! TARGET_ALTIVEC
)
36316 machine_mode mode
= TYPE_MODE (char_type_node
);
36317 rtx addr
= expand_expr (address
, NULL_RTX
, VOIDmode
, EXPAND_NORMAL
);
36318 rtx mem
= gen_rtx_MEM (BLKmode
, addr
);
36319 rtx value
= gen_int_mode (16, mode
);
36321 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
36322 The unwinder still needs to know the size of Altivec registers. */
36324 for (i
= FIRST_ALTIVEC_REGNO
; i
< LAST_ALTIVEC_REGNO
+1; i
++)
36326 int column
= DWARF_REG_TO_UNWIND_COLUMN
36327 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i
), true));
36328 HOST_WIDE_INT offset
= column
* GET_MODE_SIZE (mode
);
36330 emit_move_insn (adjust_address (mem
, mode
, offset
), value
);
36335 /* Map internal gcc register numbers to debug format register numbers.
36336 FORMAT specifies the type of debug register number to use:
36337 0 -- debug information, except for frame-related sections
36338 1 -- DWARF .debug_frame section
36339 2 -- DWARF .eh_frame section */
36342 rs6000_dbx_register_number (unsigned int regno
, unsigned int format
)
36344 /* Except for the above, we use the internal number for non-DWARF
36345 debug information, and also for .eh_frame. */
36346 if ((format
== 0 && write_symbols
!= DWARF2_DEBUG
) || format
== 2)
36349 /* On some platforms, we use the standard DWARF register
36350 numbering for .debug_info and .debug_frame. */
36351 #ifdef RS6000_USE_DWARF_NUMBERING
36354 if (regno
== LR_REGNO
)
36356 if (regno
== CTR_REGNO
)
36358 /* Special handling for CR for .debug_frame: rs6000_emit_prologue has
36359 translated any combination of CR2, CR3, CR4 saves to a save of CR2.
36360 The actual code emitted saves the whole of CR, so we map CR2_REGNO
36361 to the DWARF reg for CR. */
36362 if (format
== 1 && regno
== CR2_REGNO
)
36364 if (CR_REGNO_P (regno
))
36365 return regno
- CR0_REGNO
+ 86;
36366 if (regno
== CA_REGNO
)
36367 return 101; /* XER */
36368 if (ALTIVEC_REGNO_P (regno
))
36369 return regno
- FIRST_ALTIVEC_REGNO
+ 1124;
36370 if (regno
== VRSAVE_REGNO
)
36372 if (regno
== VSCR_REGNO
)
36378 /* target hook eh_return_filter_mode */
36379 static scalar_int_mode
36380 rs6000_eh_return_filter_mode (void)
36382 return TARGET_32BIT
? SImode
: word_mode
;
36385 /* Target hook for scalar_mode_supported_p. */
36387 rs6000_scalar_mode_supported_p (scalar_mode mode
)
36389 /* -m32 does not support TImode. This is the default, from
36390 default_scalar_mode_supported_p. For -m32 -mpowerpc64 we want the
36391 same ABI as for -m32. But default_scalar_mode_supported_p allows
36392 integer modes of precision 2 * BITS_PER_WORD, which matches TImode
36393 for -mpowerpc64. */
36394 if (TARGET_32BIT
&& mode
== TImode
)
36397 if (DECIMAL_FLOAT_MODE_P (mode
))
36398 return default_decimal_float_supported_p ();
36399 else if (TARGET_FLOAT128_TYPE
&& (mode
== KFmode
|| mode
== IFmode
))
36402 return default_scalar_mode_supported_p (mode
);
36405 /* Target hook for vector_mode_supported_p. */
36407 rs6000_vector_mode_supported_p (machine_mode mode
)
36410 if (TARGET_PAIRED_FLOAT
&& PAIRED_VECTOR_MODE (mode
))
36413 /* There is no vector form for IEEE 128-bit. If we return true for IEEE
36414 128-bit, the compiler might try to widen IEEE 128-bit to IBM
36416 else if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode
) && !FLOAT128_IEEE_P (mode
))
36423 /* Target hook for floatn_mode. */
36424 static opt_scalar_float_mode
36425 rs6000_floatn_mode (int n
, bool extended
)
36435 if (TARGET_FLOAT128_TYPE
)
36436 return (FLOAT128_IEEE_P (TFmode
)) ? TFmode
: KFmode
;
36438 return opt_scalar_float_mode ();
36441 return opt_scalar_float_mode ();
36444 /* Those are the only valid _FloatNx types. */
36445 gcc_unreachable ();
36459 if (TARGET_FLOAT128_TYPE
)
36460 return (FLOAT128_IEEE_P (TFmode
)) ? TFmode
: KFmode
;
36462 return opt_scalar_float_mode ();
36465 return opt_scalar_float_mode ();
36471 /* Target hook for c_mode_for_suffix. */
36472 static machine_mode
36473 rs6000_c_mode_for_suffix (char suffix
)
36475 if (TARGET_FLOAT128_TYPE
)
36477 if (suffix
== 'q' || suffix
== 'Q')
36478 return (FLOAT128_IEEE_P (TFmode
)) ? TFmode
: KFmode
;
36480 /* At the moment, we are not defining a suffix for IBM extended double.
36481 If/when the default for -mabi=ieeelongdouble is changed, and we want
36482 to support __ibm128 constants in legacy library code, we may need to
36483 re-evalaute this decision. Currently, c-lex.c only supports 'w' and
36484 'q' as machine dependent suffixes. The x86_64 port uses 'w' for
36485 __float80 constants. */
36491 /* Target hook for invalid_arg_for_unprototyped_fn. */
36492 static const char *
36493 invalid_arg_for_unprototyped_fn (const_tree typelist
, const_tree funcdecl
, const_tree val
)
36495 return (!rs6000_darwin64_abi
36497 && TREE_CODE (TREE_TYPE (val
)) == VECTOR_TYPE
36498 && (funcdecl
== NULL_TREE
36499 || (TREE_CODE (funcdecl
) == FUNCTION_DECL
36500 && DECL_BUILT_IN_CLASS (funcdecl
) != BUILT_IN_MD
)))
36501 ? N_("AltiVec argument passed to unprototyped function")
36505 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
36506 setup by using __stack_chk_fail_local hidden function instead of
36507 calling __stack_chk_fail directly. Otherwise it is better to call
36508 __stack_chk_fail directly. */
36510 static tree ATTRIBUTE_UNUSED
36511 rs6000_stack_protect_fail (void)
36513 return (DEFAULT_ABI
== ABI_V4
&& TARGET_SECURE_PLT
&& flag_pic
)
36514 ? default_hidden_stack_protect_fail ()
36515 : default_external_stack_protect_fail ();
36518 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
36521 static unsigned HOST_WIDE_INT
36522 rs6000_asan_shadow_offset (void)
36524 return (unsigned HOST_WIDE_INT
) 1 << (TARGET_64BIT
? 41 : 29);
36528 /* Mask options that we want to support inside of attribute((target)) and
36529 #pragma GCC target operations. Note, we do not include things like
36530 64/32-bit, endianness, hard/soft floating point, etc. that would have
36531 different calling sequences. */
36533 struct rs6000_opt_mask
{
36534 const char *name
; /* option name */
36535 HOST_WIDE_INT mask
; /* mask to set */
36536 bool invert
; /* invert sense of mask */
36537 bool valid_target
; /* option is a target option */
36540 static struct rs6000_opt_mask
const rs6000_opt_masks
[] =
36542 { "altivec", OPTION_MASK_ALTIVEC
, false, true },
36543 { "cmpb", OPTION_MASK_CMPB
, false, true },
36544 { "crypto", OPTION_MASK_CRYPTO
, false, true },
36545 { "direct-move", OPTION_MASK_DIRECT_MOVE
, false, true },
36546 { "dlmzb", OPTION_MASK_DLMZB
, false, true },
36547 { "efficient-unaligned-vsx", OPTION_MASK_EFFICIENT_UNALIGNED_VSX
,
36549 { "float128", OPTION_MASK_FLOAT128_KEYWORD
, false, true },
36550 { "float128-hardware", OPTION_MASK_FLOAT128_HW
, false, true },
36551 { "fprnd", OPTION_MASK_FPRND
, false, true },
36552 { "hard-dfp", OPTION_MASK_DFP
, false, true },
36553 { "htm", OPTION_MASK_HTM
, false, true },
36554 { "isel", OPTION_MASK_ISEL
, false, true },
36555 { "mfcrf", OPTION_MASK_MFCRF
, false, true },
36556 { "mfpgpr", OPTION_MASK_MFPGPR
, false, true },
36557 { "modulo", OPTION_MASK_MODULO
, false, true },
36558 { "mulhw", OPTION_MASK_MULHW
, false, true },
36559 { "multiple", OPTION_MASK_MULTIPLE
, false, true },
36560 { "popcntb", OPTION_MASK_POPCNTB
, false, true },
36561 { "popcntd", OPTION_MASK_POPCNTD
, false, true },
36562 { "power8-fusion", OPTION_MASK_P8_FUSION
, false, true },
36563 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN
, false, true },
36564 { "power8-vector", OPTION_MASK_P8_VECTOR
, false, true },
36565 { "power9-fusion", OPTION_MASK_P9_FUSION
, false, true },
36566 { "power9-minmax", OPTION_MASK_P9_MINMAX
, false, true },
36567 { "power9-misc", OPTION_MASK_P9_MISC
, false, true },
36568 { "power9-vector", OPTION_MASK_P9_VECTOR
, false, true },
36569 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT
, false, true },
36570 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT
, false, true },
36571 { "quad-memory", OPTION_MASK_QUAD_MEMORY
, false, true },
36572 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC
, false, true },
36573 { "recip-precision", OPTION_MASK_RECIP_PRECISION
, false, true },
36574 { "save-toc-indirect", OPTION_MASK_SAVE_TOC_INDIRECT
, false, true },
36575 { "string", OPTION_MASK_STRING
, false, true },
36576 { "toc-fusion", OPTION_MASK_TOC_FUSION
, false, true },
36577 { "update", OPTION_MASK_NO_UPDATE
, true , true },
36578 { "vsx", OPTION_MASK_VSX
, false, true },
36579 #ifdef OPTION_MASK_64BIT
36581 { "aix64", OPTION_MASK_64BIT
, false, false },
36582 { "aix32", OPTION_MASK_64BIT
, true, false },
36584 { "64", OPTION_MASK_64BIT
, false, false },
36585 { "32", OPTION_MASK_64BIT
, true, false },
36588 #ifdef OPTION_MASK_EABI
36589 { "eabi", OPTION_MASK_EABI
, false, false },
36591 #ifdef OPTION_MASK_LITTLE_ENDIAN
36592 { "little", OPTION_MASK_LITTLE_ENDIAN
, false, false },
36593 { "big", OPTION_MASK_LITTLE_ENDIAN
, true, false },
36595 #ifdef OPTION_MASK_RELOCATABLE
36596 { "relocatable", OPTION_MASK_RELOCATABLE
, false, false },
36598 #ifdef OPTION_MASK_STRICT_ALIGN
36599 { "strict-align", OPTION_MASK_STRICT_ALIGN
, false, false },
36601 { "soft-float", OPTION_MASK_SOFT_FLOAT
, false, false },
36602 { "string", OPTION_MASK_STRING
, false, false },
36605 /* Builtin mask mapping for printing the flags. */
36606 static struct rs6000_opt_mask
const rs6000_builtin_mask_names
[] =
36608 { "altivec", RS6000_BTM_ALTIVEC
, false, false },
36609 { "vsx", RS6000_BTM_VSX
, false, false },
36610 { "paired", RS6000_BTM_PAIRED
, false, false },
36611 { "fre", RS6000_BTM_FRE
, false, false },
36612 { "fres", RS6000_BTM_FRES
, false, false },
36613 { "frsqrte", RS6000_BTM_FRSQRTE
, false, false },
36614 { "frsqrtes", RS6000_BTM_FRSQRTES
, false, false },
36615 { "popcntd", RS6000_BTM_POPCNTD
, false, false },
36616 { "cell", RS6000_BTM_CELL
, false, false },
36617 { "power8-vector", RS6000_BTM_P8_VECTOR
, false, false },
36618 { "power9-vector", RS6000_BTM_P9_VECTOR
, false, false },
36619 { "power9-misc", RS6000_BTM_P9_MISC
, false, false },
36620 { "crypto", RS6000_BTM_CRYPTO
, false, false },
36621 { "htm", RS6000_BTM_HTM
, false, false },
36622 { "hard-dfp", RS6000_BTM_DFP
, false, false },
36623 { "hard-float", RS6000_BTM_HARD_FLOAT
, false, false },
36624 { "long-double-128", RS6000_BTM_LDBL128
, false, false },
36625 { "float128", RS6000_BTM_FLOAT128
, false, false },
36626 { "float128-hw", RS6000_BTM_FLOAT128_HW
,false, false },
36629 /* Option variables that we want to support inside attribute((target)) and
36630 #pragma GCC target operations. */
36632 struct rs6000_opt_var
{
36633 const char *name
; /* option name */
36634 size_t global_offset
; /* offset of the option in global_options. */
36635 size_t target_offset
; /* offset of the option in target options. */
36638 static struct rs6000_opt_var
const rs6000_opt_vars
[] =
36641 offsetof (struct gcc_options
, x_TARGET_FRIZ
),
36642 offsetof (struct cl_target_option
, x_TARGET_FRIZ
), },
36643 { "avoid-indexed-addresses",
36644 offsetof (struct gcc_options
, x_TARGET_AVOID_XFORM
),
36645 offsetof (struct cl_target_option
, x_TARGET_AVOID_XFORM
) },
36647 offsetof (struct gcc_options
, x_rs6000_paired_float
),
36648 offsetof (struct cl_target_option
, x_rs6000_paired_float
), },
36650 offsetof (struct gcc_options
, x_rs6000_default_long_calls
),
36651 offsetof (struct cl_target_option
, x_rs6000_default_long_calls
), },
36652 { "optimize-swaps",
36653 offsetof (struct gcc_options
, x_rs6000_optimize_swaps
),
36654 offsetof (struct cl_target_option
, x_rs6000_optimize_swaps
), },
36655 { "allow-movmisalign",
36656 offsetof (struct gcc_options
, x_TARGET_ALLOW_MOVMISALIGN
),
36657 offsetof (struct cl_target_option
, x_TARGET_ALLOW_MOVMISALIGN
), },
36659 offsetof (struct gcc_options
, x_TARGET_SCHED_GROUPS
),
36660 offsetof (struct cl_target_option
, x_TARGET_SCHED_GROUPS
), },
36662 offsetof (struct gcc_options
, x_TARGET_ALWAYS_HINT
),
36663 offsetof (struct cl_target_option
, x_TARGET_ALWAYS_HINT
), },
36664 { "align-branch-targets",
36665 offsetof (struct gcc_options
, x_TARGET_ALIGN_BRANCH_TARGETS
),
36666 offsetof (struct cl_target_option
, x_TARGET_ALIGN_BRANCH_TARGETS
), },
36668 offsetof (struct gcc_options
, x_tls_markers
),
36669 offsetof (struct cl_target_option
, x_tls_markers
), },
36671 offsetof (struct gcc_options
, x_TARGET_SCHED_PROLOG
),
36672 offsetof (struct cl_target_option
, x_TARGET_SCHED_PROLOG
), },
36674 offsetof (struct gcc_options
, x_TARGET_SCHED_PROLOG
),
36675 offsetof (struct cl_target_option
, x_TARGET_SCHED_PROLOG
), },
36678 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
36679 parsing. Return true if there were no errors. */
36682 rs6000_inner_target_options (tree args
, bool attr_p
)
36686 if (args
== NULL_TREE
)
36689 else if (TREE_CODE (args
) == STRING_CST
)
36691 char *p
= ASTRDUP (TREE_STRING_POINTER (args
));
36694 while ((q
= strtok (p
, ",")) != NULL
)
36696 bool error_p
= false;
36697 bool not_valid_p
= false;
36698 const char *cpu_opt
= NULL
;
36701 if (strncmp (q
, "cpu=", 4) == 0)
36703 int cpu_index
= rs6000_cpu_name_lookup (q
+4);
36704 if (cpu_index
>= 0)
36705 rs6000_cpu_index
= cpu_index
;
36712 else if (strncmp (q
, "tune=", 5) == 0)
36714 int tune_index
= rs6000_cpu_name_lookup (q
+5);
36715 if (tune_index
>= 0)
36716 rs6000_tune_index
= tune_index
;
36726 bool invert
= false;
36730 if (strncmp (r
, "no-", 3) == 0)
36736 for (i
= 0; i
< ARRAY_SIZE (rs6000_opt_masks
); i
++)
36737 if (strcmp (r
, rs6000_opt_masks
[i
].name
) == 0)
36739 HOST_WIDE_INT mask
= rs6000_opt_masks
[i
].mask
;
36741 if (!rs6000_opt_masks
[i
].valid_target
)
36742 not_valid_p
= true;
36746 rs6000_isa_flags_explicit
|= mask
;
36748 /* VSX needs altivec, so -mvsx automagically sets
36749 altivec and disables -mavoid-indexed-addresses. */
36752 if (mask
== OPTION_MASK_VSX
)
36754 mask
|= OPTION_MASK_ALTIVEC
;
36755 TARGET_AVOID_XFORM
= 0;
36759 if (rs6000_opt_masks
[i
].invert
)
36763 rs6000_isa_flags
&= ~mask
;
36765 rs6000_isa_flags
|= mask
;
36770 if (error_p
&& !not_valid_p
)
36772 for (i
= 0; i
< ARRAY_SIZE (rs6000_opt_vars
); i
++)
36773 if (strcmp (r
, rs6000_opt_vars
[i
].name
) == 0)
36775 size_t j
= rs6000_opt_vars
[i
].global_offset
;
36776 *((int *) ((char *)&global_options
+ j
)) = !invert
;
36778 not_valid_p
= false;
36786 const char *eprefix
, *esuffix
;
36791 eprefix
= "__attribute__((__target__(";
36796 eprefix
= "#pragma GCC target ";
36801 error ("invalid cpu %qs for %s%qs%s", cpu_opt
, eprefix
,
36803 else if (not_valid_p
)
36804 error ("%s%qs%s is not allowed", eprefix
, q
, esuffix
);
36806 error ("%s%qs%s is invalid", eprefix
, q
, esuffix
);
36811 else if (TREE_CODE (args
) == TREE_LIST
)
36815 tree value
= TREE_VALUE (args
);
36818 bool ret2
= rs6000_inner_target_options (value
, attr_p
);
36822 args
= TREE_CHAIN (args
);
36824 while (args
!= NULL_TREE
);
36829 error ("attribute %<target%> argument not a string");
36836 /* Print out the target options as a list for -mdebug=target. */
36839 rs6000_debug_target_options (tree args
, const char *prefix
)
36841 if (args
== NULL_TREE
)
36842 fprintf (stderr
, "%s<NULL>", prefix
);
36844 else if (TREE_CODE (args
) == STRING_CST
)
36846 char *p
= ASTRDUP (TREE_STRING_POINTER (args
));
36849 while ((q
= strtok (p
, ",")) != NULL
)
36852 fprintf (stderr
, "%s\"%s\"", prefix
, q
);
36857 else if (TREE_CODE (args
) == TREE_LIST
)
36861 tree value
= TREE_VALUE (args
);
36864 rs6000_debug_target_options (value
, prefix
);
36867 args
= TREE_CHAIN (args
);
36869 while (args
!= NULL_TREE
);
36873 gcc_unreachable ();
36879 /* Hook to validate attribute((target("..."))). */
36882 rs6000_valid_attribute_p (tree fndecl
,
36883 tree
ARG_UNUSED (name
),
36887 struct cl_target_option cur_target
;
36890 tree new_target
, new_optimize
;
36891 tree func_optimize
;
36893 gcc_assert ((fndecl
!= NULL_TREE
) && (args
!= NULL_TREE
));
36895 if (TARGET_DEBUG_TARGET
)
36897 tree tname
= DECL_NAME (fndecl
);
36898 fprintf (stderr
, "\n==================== rs6000_valid_attribute_p:\n");
36900 fprintf (stderr
, "function: %.*s\n",
36901 (int) IDENTIFIER_LENGTH (tname
),
36902 IDENTIFIER_POINTER (tname
));
36904 fprintf (stderr
, "function: unknown\n");
36906 fprintf (stderr
, "args:");
36907 rs6000_debug_target_options (args
, " ");
36908 fprintf (stderr
, "\n");
36911 fprintf (stderr
, "flags: 0x%x\n", flags
);
36913 fprintf (stderr
, "--------------------\n");
36916 /* attribute((target("default"))) does nothing, beyond
36917 affecting multi-versioning. */
36918 if (TREE_VALUE (args
)
36919 && TREE_CODE (TREE_VALUE (args
)) == STRING_CST
36920 && TREE_CHAIN (args
) == NULL_TREE
36921 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args
)), "default") == 0)
36924 old_optimize
= build_optimization_node (&global_options
);
36925 func_optimize
= DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl
);
36927 /* If the function changed the optimization levels as well as setting target
36928 options, start with the optimizations specified. */
36929 if (func_optimize
&& func_optimize
!= old_optimize
)
36930 cl_optimization_restore (&global_options
,
36931 TREE_OPTIMIZATION (func_optimize
));
36933 /* The target attributes may also change some optimization flags, so update
36934 the optimization options if necessary. */
36935 cl_target_option_save (&cur_target
, &global_options
);
36936 rs6000_cpu_index
= rs6000_tune_index
= -1;
36937 ret
= rs6000_inner_target_options (args
, true);
36939 /* Set up any additional state. */
36942 ret
= rs6000_option_override_internal (false);
36943 new_target
= build_target_option_node (&global_options
);
36948 new_optimize
= build_optimization_node (&global_options
);
36955 DECL_FUNCTION_SPECIFIC_TARGET (fndecl
) = new_target
;
36957 if (old_optimize
!= new_optimize
)
36958 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl
) = new_optimize
;
36961 cl_target_option_restore (&global_options
, &cur_target
);
36963 if (old_optimize
!= new_optimize
)
36964 cl_optimization_restore (&global_options
,
36965 TREE_OPTIMIZATION (old_optimize
));
36971 /* Hook to validate the current #pragma GCC target and set the state, and
36972 update the macros based on what was changed. If ARGS is NULL, then
36973 POP_TARGET is used to reset the options. */
36976 rs6000_pragma_target_parse (tree args
, tree pop_target
)
36978 tree prev_tree
= build_target_option_node (&global_options
);
36980 struct cl_target_option
*prev_opt
, *cur_opt
;
36981 HOST_WIDE_INT prev_flags
, cur_flags
, diff_flags
;
36982 HOST_WIDE_INT prev_bumask
, cur_bumask
, diff_bumask
;
36984 if (TARGET_DEBUG_TARGET
)
36986 fprintf (stderr
, "\n==================== rs6000_pragma_target_parse\n");
36987 fprintf (stderr
, "args:");
36988 rs6000_debug_target_options (args
, " ");
36989 fprintf (stderr
, "\n");
36993 fprintf (stderr
, "pop_target:\n");
36994 debug_tree (pop_target
);
36997 fprintf (stderr
, "pop_target: <NULL>\n");
36999 fprintf (stderr
, "--------------------\n");
37004 cur_tree
= ((pop_target
)
37006 : target_option_default_node
);
37007 cl_target_option_restore (&global_options
,
37008 TREE_TARGET_OPTION (cur_tree
));
37012 rs6000_cpu_index
= rs6000_tune_index
= -1;
37013 if (!rs6000_inner_target_options (args
, false)
37014 || !rs6000_option_override_internal (false)
37015 || (cur_tree
= build_target_option_node (&global_options
))
37018 if (TARGET_DEBUG_BUILTIN
|| TARGET_DEBUG_TARGET
)
37019 fprintf (stderr
, "invalid pragma\n");
37025 target_option_current_node
= cur_tree
;
37026 rs6000_activate_target_options (target_option_current_node
);
37028 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
37029 change the macros that are defined. */
37030 if (rs6000_target_modify_macros_ptr
)
37032 prev_opt
= TREE_TARGET_OPTION (prev_tree
);
37033 prev_bumask
= prev_opt
->x_rs6000_builtin_mask
;
37034 prev_flags
= prev_opt
->x_rs6000_isa_flags
;
37036 cur_opt
= TREE_TARGET_OPTION (cur_tree
);
37037 cur_flags
= cur_opt
->x_rs6000_isa_flags
;
37038 cur_bumask
= cur_opt
->x_rs6000_builtin_mask
;
37040 diff_bumask
= (prev_bumask
^ cur_bumask
);
37041 diff_flags
= (prev_flags
^ cur_flags
);
37043 if ((diff_flags
!= 0) || (diff_bumask
!= 0))
37045 /* Delete old macros. */
37046 rs6000_target_modify_macros_ptr (false,
37047 prev_flags
& diff_flags
,
37048 prev_bumask
& diff_bumask
);
37050 /* Define new macros. */
37051 rs6000_target_modify_macros_ptr (true,
37052 cur_flags
& diff_flags
,
37053 cur_bumask
& diff_bumask
);
37061 /* Remember the last target of rs6000_set_current_function. */
37062 static GTY(()) tree rs6000_previous_fndecl
;
37064 /* Restore target's globals from NEW_TREE and invalidate the
37065 rs6000_previous_fndecl cache. */
37068 rs6000_activate_target_options (tree new_tree
)
37070 cl_target_option_restore (&global_options
, TREE_TARGET_OPTION (new_tree
));
37071 if (TREE_TARGET_GLOBALS (new_tree
))
37072 restore_target_globals (TREE_TARGET_GLOBALS (new_tree
));
37073 else if (new_tree
== target_option_default_node
)
37074 restore_target_globals (&default_target_globals
);
37076 TREE_TARGET_GLOBALS (new_tree
) = save_target_globals_default_opts ();
37077 rs6000_previous_fndecl
= NULL_TREE
;
37080 /* Establish appropriate back-end context for processing the function
37081 FNDECL. The argument might be NULL to indicate processing at top
37082 level, outside of any function scope. */
37084 rs6000_set_current_function (tree fndecl
)
37086 if (TARGET_DEBUG_TARGET
)
37088 fprintf (stderr
, "\n==================== rs6000_set_current_function");
37091 fprintf (stderr
, ", fndecl %s (%p)",
37092 (DECL_NAME (fndecl
)
37093 ? IDENTIFIER_POINTER (DECL_NAME (fndecl
))
37094 : "<unknown>"), (void *)fndecl
);
37096 if (rs6000_previous_fndecl
)
37097 fprintf (stderr
, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl
);
37099 fprintf (stderr
, "\n");
37102 /* Only change the context if the function changes. This hook is called
37103 several times in the course of compiling a function, and we don't want to
37104 slow things down too much or call target_reinit when it isn't safe. */
37105 if (fndecl
== rs6000_previous_fndecl
)
37109 if (rs6000_previous_fndecl
== NULL_TREE
)
37110 old_tree
= target_option_current_node
;
37111 else if (DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl
))
37112 old_tree
= DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl
);
37114 old_tree
= target_option_default_node
;
37117 if (fndecl
== NULL_TREE
)
37119 if (old_tree
!= target_option_current_node
)
37120 new_tree
= target_option_current_node
;
37122 new_tree
= NULL_TREE
;
37126 new_tree
= DECL_FUNCTION_SPECIFIC_TARGET (fndecl
);
37127 if (new_tree
== NULL_TREE
)
37128 new_tree
= target_option_default_node
;
37131 if (TARGET_DEBUG_TARGET
)
37135 fprintf (stderr
, "\nnew fndecl target specific options:\n");
37136 debug_tree (new_tree
);
37141 fprintf (stderr
, "\nold fndecl target specific options:\n");
37142 debug_tree (old_tree
);
37145 if (old_tree
!= NULL_TREE
|| new_tree
!= NULL_TREE
)
37146 fprintf (stderr
, "--------------------\n");
37149 if (new_tree
&& old_tree
!= new_tree
)
37150 rs6000_activate_target_options (new_tree
);
37153 rs6000_previous_fndecl
= fndecl
;
37157 /* Save the current options */
37160 rs6000_function_specific_save (struct cl_target_option
*ptr
,
37161 struct gcc_options
*opts
)
37163 ptr
->x_rs6000_isa_flags
= opts
->x_rs6000_isa_flags
;
37164 ptr
->x_rs6000_isa_flags_explicit
= opts
->x_rs6000_isa_flags_explicit
;
37167 /* Restore the current options */
37170 rs6000_function_specific_restore (struct gcc_options
*opts
,
37171 struct cl_target_option
*ptr
)
37174 opts
->x_rs6000_isa_flags
= ptr
->x_rs6000_isa_flags
;
37175 opts
->x_rs6000_isa_flags_explicit
= ptr
->x_rs6000_isa_flags_explicit
;
37176 (void) rs6000_option_override_internal (false);
37179 /* Print the current options */
37182 rs6000_function_specific_print (FILE *file
, int indent
,
37183 struct cl_target_option
*ptr
)
37185 rs6000_print_isa_options (file
, indent
, "Isa options set",
37186 ptr
->x_rs6000_isa_flags
);
37188 rs6000_print_isa_options (file
, indent
, "Isa options explicit",
37189 ptr
->x_rs6000_isa_flags_explicit
);
37192 /* Helper function to print the current isa or misc options on a line. */
37195 rs6000_print_options_internal (FILE *file
,
37197 const char *string
,
37198 HOST_WIDE_INT flags
,
37199 const char *prefix
,
37200 const struct rs6000_opt_mask
*opts
,
37201 size_t num_elements
)
37204 size_t start_column
= 0;
37206 size_t max_column
= 120;
37207 size_t prefix_len
= strlen (prefix
);
37208 size_t comma_len
= 0;
37209 const char *comma
= "";
37212 start_column
+= fprintf (file
, "%*s", indent
, "");
37216 fprintf (stderr
, DEBUG_FMT_S
, string
, "<none>");
37220 start_column
+= fprintf (stderr
, DEBUG_FMT_WX
, string
, flags
);
37222 /* Print the various mask options. */
37223 cur_column
= start_column
;
37224 for (i
= 0; i
< num_elements
; i
++)
37226 bool invert
= opts
[i
].invert
;
37227 const char *name
= opts
[i
].name
;
37228 const char *no_str
= "";
37229 HOST_WIDE_INT mask
= opts
[i
].mask
;
37230 size_t len
= comma_len
+ prefix_len
+ strlen (name
);
37234 if ((flags
& mask
) == 0)
37237 len
+= sizeof ("no-") - 1;
37245 if ((flags
& mask
) != 0)
37248 len
+= sizeof ("no-") - 1;
37255 if (cur_column
> max_column
)
37257 fprintf (stderr
, ", \\\n%*s", (int)start_column
, "");
37258 cur_column
= start_column
+ len
;
37262 fprintf (file
, "%s%s%s%s", comma
, prefix
, no_str
, name
);
37264 comma_len
= sizeof (", ") - 1;
37267 fputs ("\n", file
);
37270 /* Helper function to print the current isa options on a line. */
37273 rs6000_print_isa_options (FILE *file
, int indent
, const char *string
,
37274 HOST_WIDE_INT flags
)
37276 rs6000_print_options_internal (file
, indent
, string
, flags
, "-m",
37277 &rs6000_opt_masks
[0],
37278 ARRAY_SIZE (rs6000_opt_masks
));
37282 rs6000_print_builtin_options (FILE *file
, int indent
, const char *string
,
37283 HOST_WIDE_INT flags
)
37285 rs6000_print_options_internal (file
, indent
, string
, flags
, "",
37286 &rs6000_builtin_mask_names
[0],
37287 ARRAY_SIZE (rs6000_builtin_mask_names
));
37290 /* If the user used -mno-vsx, we need turn off all of the implicit ISA 2.06,
37291 2.07, and 3.0 options that relate to the vector unit (-mdirect-move,
37292 -mupper-regs-df, etc.).
37294 If the user used -mno-power8-vector, we need to turn off all of the implicit
37295 ISA 2.07 and 3.0 options that relate to the vector unit.
37297 If the user used -mno-power9-vector, we need to turn off all of the implicit
37298 ISA 3.0 options that relate to the vector unit.
37300 This function does not handle explicit options such as the user specifying
37301 -mdirect-move. These are handled in rs6000_option_override_internal, and
37302 the appropriate error is given if needed.
37304 We return a mask of all of the implicit options that should not be enabled
37307 static HOST_WIDE_INT
37308 rs6000_disable_incompatible_switches (void)
37310 HOST_WIDE_INT ignore_masks
= rs6000_isa_flags_explicit
;
37313 static const struct {
37314 const HOST_WIDE_INT no_flag
; /* flag explicitly turned off. */
37315 const HOST_WIDE_INT dep_flags
; /* flags that depend on this option. */
37316 const char *const name
; /* name of the switch. */
37318 { OPTION_MASK_P9_VECTOR
, OTHER_P9_VECTOR_MASKS
, "power9-vector" },
37319 { OPTION_MASK_P8_VECTOR
, OTHER_P8_VECTOR_MASKS
, "power8-vector" },
37320 { OPTION_MASK_VSX
, OTHER_VSX_VECTOR_MASKS
, "vsx" },
37323 for (i
= 0; i
< ARRAY_SIZE (flags
); i
++)
37325 HOST_WIDE_INT no_flag
= flags
[i
].no_flag
;
37327 if ((rs6000_isa_flags
& no_flag
) == 0
37328 && (rs6000_isa_flags_explicit
& no_flag
) != 0)
37330 HOST_WIDE_INT dep_flags
= flags
[i
].dep_flags
;
37331 HOST_WIDE_INT set_flags
= (rs6000_isa_flags_explicit
37337 for (j
= 0; j
< ARRAY_SIZE (rs6000_opt_masks
); j
++)
37338 if ((set_flags
& rs6000_opt_masks
[j
].mask
) != 0)
37340 set_flags
&= ~rs6000_opt_masks
[j
].mask
;
37341 error ("%<-mno-%s%> turns off %<-m%s%>",
37343 rs6000_opt_masks
[j
].name
);
37346 gcc_assert (!set_flags
);
37349 rs6000_isa_flags
&= ~dep_flags
;
37350 ignore_masks
|= no_flag
| dep_flags
;
37354 return ignore_masks
;
37358 /* Helper function for printing the function name when debugging. */
37360 static const char *
37361 get_decl_name (tree fn
)
37368 name
= DECL_NAME (fn
);
37370 return "<no-name>";
37372 return IDENTIFIER_POINTER (name
);
37375 /* Return the clone id of the target we are compiling code for in a target
37376 clone. The clone id is ordered from 0 (default) to CLONE_MAX-1 and gives
37377 the priority list for the target clones (ordered from lowest to
37381 rs6000_clone_priority (tree fndecl
)
37383 tree fn_opts
= DECL_FUNCTION_SPECIFIC_TARGET (fndecl
);
37384 HOST_WIDE_INT isa_masks
;
37385 int ret
= CLONE_DEFAULT
;
37386 tree attrs
= lookup_attribute ("target", DECL_ATTRIBUTES (fndecl
));
37387 const char *attrs_str
= NULL
;
37389 attrs
= TREE_VALUE (TREE_VALUE (attrs
));
37390 attrs_str
= TREE_STRING_POINTER (attrs
);
37392 /* Return priority zero for default function. Return the ISA needed for the
37393 function if it is not the default. */
37394 if (strcmp (attrs_str
, "default") != 0)
37396 if (fn_opts
== NULL_TREE
)
37397 fn_opts
= target_option_default_node
;
37399 if (!fn_opts
|| !TREE_TARGET_OPTION (fn_opts
))
37400 isa_masks
= rs6000_isa_flags
;
37402 isa_masks
= TREE_TARGET_OPTION (fn_opts
)->x_rs6000_isa_flags
;
37404 for (ret
= CLONE_MAX
- 1; ret
!= 0; ret
--)
37405 if ((rs6000_clone_map
[ret
].isa_mask
& isa_masks
) != 0)
37409 if (TARGET_DEBUG_TARGET
)
37410 fprintf (stderr
, "rs6000_get_function_version_priority (%s) => %d\n",
37411 get_decl_name (fndecl
), ret
);
37416 /* This compares the priority of target features in function DECL1 and DECL2.
37417 It returns positive value if DECL1 is higher priority, negative value if
37418 DECL2 is higher priority and 0 if they are the same. Note, priorities are
37419 ordered from lowest (CLONE_DEFAULT) to highest (currently CLONE_ISA_3_0). */
37422 rs6000_compare_version_priority (tree decl1
, tree decl2
)
37424 int priority1
= rs6000_clone_priority (decl1
);
37425 int priority2
= rs6000_clone_priority (decl2
);
37426 int ret
= priority1
- priority2
;
37428 if (TARGET_DEBUG_TARGET
)
37429 fprintf (stderr
, "rs6000_compare_version_priority (%s, %s) => %d\n",
37430 get_decl_name (decl1
), get_decl_name (decl2
), ret
);
37435 /* Make a dispatcher declaration for the multi-versioned function DECL.
37436 Calls to DECL function will be replaced with calls to the dispatcher
37437 by the front-end. Returns the decl of the dispatcher function. */
37440 rs6000_get_function_versions_dispatcher (void *decl
)
37442 tree fn
= (tree
) decl
;
37443 struct cgraph_node
*node
= NULL
;
37444 struct cgraph_node
*default_node
= NULL
;
37445 struct cgraph_function_version_info
*node_v
= NULL
;
37446 struct cgraph_function_version_info
*first_v
= NULL
;
37448 tree dispatch_decl
= NULL
;
37450 struct cgraph_function_version_info
*default_version_info
= NULL
;
37451 gcc_assert (fn
!= NULL
&& DECL_FUNCTION_VERSIONED (fn
));
37453 if (TARGET_DEBUG_TARGET
)
37454 fprintf (stderr
, "rs6000_get_function_versions_dispatcher (%s)\n",
37455 get_decl_name (fn
));
37457 node
= cgraph_node::get (fn
);
37458 gcc_assert (node
!= NULL
);
37460 node_v
= node
->function_version ();
37461 gcc_assert (node_v
!= NULL
);
37463 if (node_v
->dispatcher_resolver
!= NULL
)
37464 return node_v
->dispatcher_resolver
;
37466 /* Find the default version and make it the first node. */
37468 /* Go to the beginning of the chain. */
37469 while (first_v
->prev
!= NULL
)
37470 first_v
= first_v
->prev
;
37472 default_version_info
= first_v
;
37473 while (default_version_info
!= NULL
)
37475 const tree decl2
= default_version_info
->this_node
->decl
;
37476 if (is_function_default_version (decl2
))
37478 default_version_info
= default_version_info
->next
;
37481 /* If there is no default node, just return NULL. */
37482 if (default_version_info
== NULL
)
37485 /* Make default info the first node. */
37486 if (first_v
!= default_version_info
)
37488 default_version_info
->prev
->next
= default_version_info
->next
;
37489 if (default_version_info
->next
)
37490 default_version_info
->next
->prev
= default_version_info
->prev
;
37491 first_v
->prev
= default_version_info
;
37492 default_version_info
->next
= first_v
;
37493 default_version_info
->prev
= NULL
;
37496 default_node
= default_version_info
->this_node
;
37498 #ifndef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
37499 error_at (DECL_SOURCE_LOCATION (default_node
->decl
),
37500 "target_clones attribute needs GLIBC (2.23 and newer) that "
37501 "exports hardware capability bits");
37504 if (targetm
.has_ifunc_p ())
37506 struct cgraph_function_version_info
*it_v
= NULL
;
37507 struct cgraph_node
*dispatcher_node
= NULL
;
37508 struct cgraph_function_version_info
*dispatcher_version_info
= NULL
;
37510 /* Right now, the dispatching is done via ifunc. */
37511 dispatch_decl
= make_dispatcher_decl (default_node
->decl
);
37513 dispatcher_node
= cgraph_node::get_create (dispatch_decl
);
37514 gcc_assert (dispatcher_node
!= NULL
);
37515 dispatcher_node
->dispatcher_function
= 1;
37516 dispatcher_version_info
37517 = dispatcher_node
->insert_new_function_version ();
37518 dispatcher_version_info
->next
= default_version_info
;
37519 dispatcher_node
->definition
= 1;
37521 /* Set the dispatcher for all the versions. */
37522 it_v
= default_version_info
;
37523 while (it_v
!= NULL
)
37525 it_v
->dispatcher_resolver
= dispatch_decl
;
37531 error_at (DECL_SOURCE_LOCATION (default_node
->decl
),
37532 "multiversioning needs ifunc which is not supported "
37537 return dispatch_decl
;
37540 /* Make the resolver function decl to dispatch the versions of a multi-
37541 versioned function, DEFAULT_DECL. Create an empty basic block in the
37542 resolver and store the pointer in EMPTY_BB. Return the decl of the resolver
37546 make_resolver_func (const tree default_decl
,
37547 const tree dispatch_decl
,
37548 basic_block
*empty_bb
)
37550 /* Make the resolver function static. The resolver function returns
37552 tree decl_name
= clone_function_name (default_decl
, "resolver");
37553 const char *resolver_name
= IDENTIFIER_POINTER (decl_name
);
37554 tree type
= build_function_type_list (ptr_type_node
, NULL_TREE
);
37555 tree decl
= build_fn_decl (resolver_name
, type
);
37556 SET_DECL_ASSEMBLER_NAME (decl
, decl_name
);
37558 DECL_NAME (decl
) = decl_name
;
37559 TREE_USED (decl
) = 1;
37560 DECL_ARTIFICIAL (decl
) = 1;
37561 DECL_IGNORED_P (decl
) = 0;
37562 TREE_PUBLIC (decl
) = 0;
37563 DECL_UNINLINABLE (decl
) = 1;
37565 /* Resolver is not external, body is generated. */
37566 DECL_EXTERNAL (decl
) = 0;
37567 DECL_EXTERNAL (dispatch_decl
) = 0;
37569 DECL_CONTEXT (decl
) = NULL_TREE
;
37570 DECL_INITIAL (decl
) = make_node (BLOCK
);
37571 DECL_STATIC_CONSTRUCTOR (decl
) = 0;
37573 /* Build result decl and add to function_decl. */
37574 tree t
= build_decl (UNKNOWN_LOCATION
, RESULT_DECL
, NULL_TREE
, ptr_type_node
);
37575 DECL_ARTIFICIAL (t
) = 1;
37576 DECL_IGNORED_P (t
) = 1;
37577 DECL_RESULT (decl
) = t
;
37579 gimplify_function_tree (decl
);
37580 push_cfun (DECL_STRUCT_FUNCTION (decl
));
37581 *empty_bb
= init_lowered_empty_function (decl
, false,
37582 profile_count::uninitialized ());
37584 cgraph_node::add_new_function (decl
, true);
37585 symtab
->call_cgraph_insertion_hooks (cgraph_node::get_create (decl
));
37589 /* Mark dispatch_decl as "ifunc" with resolver as resolver_name. */
37590 DECL_ATTRIBUTES (dispatch_decl
)
37591 = make_attribute ("ifunc", resolver_name
, DECL_ATTRIBUTES (dispatch_decl
));
37593 cgraph_node::create_same_body_alias (dispatch_decl
, decl
);
37598 /* This adds a condition to the basic_block NEW_BB in function FUNCTION_DECL to
37599 return a pointer to VERSION_DECL if we are running on a machine that
37600 supports the index CLONE_ISA hardware architecture bits. This function will
37601 be called during version dispatch to decide which function version to
37602 execute. It returns the basic block at the end, to which more conditions
37606 add_condition_to_bb (tree function_decl
, tree version_decl
,
37607 int clone_isa
, basic_block new_bb
)
37609 push_cfun (DECL_STRUCT_FUNCTION (function_decl
));
37611 gcc_assert (new_bb
!= NULL
);
37612 gimple_seq gseq
= bb_seq (new_bb
);
37615 tree convert_expr
= build1 (CONVERT_EXPR
, ptr_type_node
,
37616 build_fold_addr_expr (version_decl
));
37617 tree result_var
= create_tmp_var (ptr_type_node
);
37618 gimple
*convert_stmt
= gimple_build_assign (result_var
, convert_expr
);
37619 gimple
*return_stmt
= gimple_build_return (result_var
);
37621 if (clone_isa
== CLONE_DEFAULT
)
37623 gimple_seq_add_stmt (&gseq
, convert_stmt
);
37624 gimple_seq_add_stmt (&gseq
, return_stmt
);
37625 set_bb_seq (new_bb
, gseq
);
37626 gimple_set_bb (convert_stmt
, new_bb
);
37627 gimple_set_bb (return_stmt
, new_bb
);
37632 tree bool_zero
= build_int_cst (bool_int_type_node
, 0);
37633 tree cond_var
= create_tmp_var (bool_int_type_node
);
37634 tree predicate_decl
= rs6000_builtin_decls
[(int) RS6000_BUILTIN_CPU_SUPPORTS
];
37635 const char *arg_str
= rs6000_clone_map
[clone_isa
].name
;
37636 tree predicate_arg
= build_string_literal (strlen (arg_str
) + 1, arg_str
);
37637 gimple
*call_cond_stmt
= gimple_build_call (predicate_decl
, 1, predicate_arg
);
37638 gimple_call_set_lhs (call_cond_stmt
, cond_var
);
37640 gimple_set_block (call_cond_stmt
, DECL_INITIAL (function_decl
));
37641 gimple_set_bb (call_cond_stmt
, new_bb
);
37642 gimple_seq_add_stmt (&gseq
, call_cond_stmt
);
37644 gimple
*if_else_stmt
= gimple_build_cond (NE_EXPR
, cond_var
, bool_zero
,
37645 NULL_TREE
, NULL_TREE
);
37646 gimple_set_block (if_else_stmt
, DECL_INITIAL (function_decl
));
37647 gimple_set_bb (if_else_stmt
, new_bb
);
37648 gimple_seq_add_stmt (&gseq
, if_else_stmt
);
37650 gimple_seq_add_stmt (&gseq
, convert_stmt
);
37651 gimple_seq_add_stmt (&gseq
, return_stmt
);
37652 set_bb_seq (new_bb
, gseq
);
37654 basic_block bb1
= new_bb
;
37655 edge e12
= split_block (bb1
, if_else_stmt
);
37656 basic_block bb2
= e12
->dest
;
37657 e12
->flags
&= ~EDGE_FALLTHRU
;
37658 e12
->flags
|= EDGE_TRUE_VALUE
;
37660 edge e23
= split_block (bb2
, return_stmt
);
37661 gimple_set_bb (convert_stmt
, bb2
);
37662 gimple_set_bb (return_stmt
, bb2
);
37664 basic_block bb3
= e23
->dest
;
37665 make_edge (bb1
, bb3
, EDGE_FALSE_VALUE
);
37668 make_edge (bb2
, EXIT_BLOCK_PTR_FOR_FN (cfun
), 0);
37674 /* This function generates the dispatch function for multi-versioned functions.
37675 DISPATCH_DECL is the function which will contain the dispatch logic.
37676 FNDECLS are the function choices for dispatch, and is a tree chain.
37677 EMPTY_BB is the basic block pointer in DISPATCH_DECL in which the dispatch
37678 code is generated. */
37681 dispatch_function_versions (tree dispatch_decl
,
37683 basic_block
*empty_bb
)
37687 vec
<tree
> *fndecls
;
37688 tree clones
[CLONE_MAX
];
37690 if (TARGET_DEBUG_TARGET
)
37691 fputs ("dispatch_function_versions, top\n", stderr
);
37693 gcc_assert (dispatch_decl
!= NULL
37694 && fndecls_p
!= NULL
37695 && empty_bb
!= NULL
);
37697 /* fndecls_p is actually a vector. */
37698 fndecls
= static_cast<vec
<tree
> *> (fndecls_p
);
37700 /* At least one more version other than the default. */
37701 gcc_assert (fndecls
->length () >= 2);
37703 /* The first version in the vector is the default decl. */
37704 memset ((void *) clones
, '\0', sizeof (clones
));
37705 clones
[CLONE_DEFAULT
] = (*fndecls
)[0];
37707 /* On the PowerPC, we do not need to call __builtin_cpu_init, which is a NOP
37708 on the PowerPC (on the x86_64, it is not a NOP). The builtin function
37709 __builtin_cpu_support ensures that the TOC fields are setup by requiring a
37710 recent glibc. If we ever need to call __builtin_cpu_init, we would need
37711 to insert the code here to do the call. */
37713 for (ix
= 1; fndecls
->iterate (ix
, &ele
); ++ix
)
37715 int priority
= rs6000_clone_priority (ele
);
37716 if (!clones
[priority
])
37717 clones
[priority
] = ele
;
37720 for (ix
= CLONE_MAX
- 1; ix
>= 0; ix
--)
37723 if (TARGET_DEBUG_TARGET
)
37724 fprintf (stderr
, "dispatch_function_versions, clone %d, %s\n",
37725 ix
, get_decl_name (clones
[ix
]));
37727 *empty_bb
= add_condition_to_bb (dispatch_decl
, clones
[ix
], ix
,
37734 /* Generate the dispatching code body to dispatch multi-versioned function
37735 DECL. The target hook is called to process the "target" attributes and
37736 provide the code to dispatch the right function at run-time. NODE points
37737 to the dispatcher decl whose body will be created. */
37740 rs6000_generate_version_dispatcher_body (void *node_p
)
37743 basic_block empty_bb
;
37744 struct cgraph_node
*node
= (cgraph_node
*) node_p
;
37745 struct cgraph_function_version_info
*ninfo
= node
->function_version ();
37747 if (ninfo
->dispatcher_resolver
)
37748 return ninfo
->dispatcher_resolver
;
37750 /* node is going to be an alias, so remove the finalized bit. */
37751 node
->definition
= false;
37753 /* The first version in the chain corresponds to the default version. */
37754 ninfo
->dispatcher_resolver
= resolver
37755 = make_resolver_func (ninfo
->next
->this_node
->decl
, node
->decl
, &empty_bb
);
37757 if (TARGET_DEBUG_TARGET
)
37758 fprintf (stderr
, "rs6000_get_function_versions_dispatcher, %s\n",
37759 get_decl_name (resolver
));
37761 push_cfun (DECL_STRUCT_FUNCTION (resolver
));
37762 auto_vec
<tree
, 2> fn_ver_vec
;
37764 for (struct cgraph_function_version_info
*vinfo
= ninfo
->next
;
37766 vinfo
= vinfo
->next
)
37768 struct cgraph_node
*version
= vinfo
->this_node
;
37769 /* Check for virtual functions here again, as by this time it should
37770 have been determined if this function needs a vtable index or
37771 not. This happens for methods in derived classes that override
37772 virtual methods in base classes but are not explicitly marked as
37774 if (DECL_VINDEX (version
->decl
))
37775 sorry ("Virtual function multiversioning not supported");
37777 fn_ver_vec
.safe_push (version
->decl
);
37780 dispatch_function_versions (resolver
, &fn_ver_vec
, &empty_bb
);
37781 cgraph_edge::rebuild_edges ();
37787 /* Hook to determine if one function can safely inline another. */
37790 rs6000_can_inline_p (tree caller
, tree callee
)
37793 tree caller_tree
= DECL_FUNCTION_SPECIFIC_TARGET (caller
);
37794 tree callee_tree
= DECL_FUNCTION_SPECIFIC_TARGET (callee
);
37796 /* If callee has no option attributes, then it is ok to inline. */
37800 /* If caller has no option attributes, but callee does then it is not ok to
37802 else if (!caller_tree
)
37807 struct cl_target_option
*caller_opts
= TREE_TARGET_OPTION (caller_tree
);
37808 struct cl_target_option
*callee_opts
= TREE_TARGET_OPTION (callee_tree
);
37810 /* Callee's options should a subset of the caller's, i.e. a vsx function
37811 can inline an altivec function but a non-vsx function can't inline a
37813 if ((caller_opts
->x_rs6000_isa_flags
& callee_opts
->x_rs6000_isa_flags
)
37814 == callee_opts
->x_rs6000_isa_flags
)
37818 if (TARGET_DEBUG_TARGET
)
37819 fprintf (stderr
, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
37820 get_decl_name (caller
), get_decl_name (callee
),
37821 (ret
? "can" : "cannot"));
37826 /* Allocate a stack temp and fixup the address so it meets the particular
37827 memory requirements (either offetable or REG+REG addressing). */
37830 rs6000_allocate_stack_temp (machine_mode mode
,
37831 bool offsettable_p
,
37834 rtx stack
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
));
37835 rtx addr
= XEXP (stack
, 0);
37836 int strict_p
= reload_completed
;
37838 if (!legitimate_indirect_address_p (addr
, strict_p
))
37841 && !rs6000_legitimate_offset_address_p (mode
, addr
, strict_p
, true))
37842 stack
= replace_equiv_address (stack
, copy_addr_to_reg (addr
));
37844 else if (reg_reg_p
&& !legitimate_indexed_address_p (addr
, strict_p
))
37845 stack
= replace_equiv_address (stack
, copy_addr_to_reg (addr
));
37851 /* Given a memory reference, if it is not a reg or reg+reg addressing, convert
37852 to such a form to deal with memory reference instructions like STFIWX that
37853 only take reg+reg addressing. */
37856 rs6000_address_for_fpconvert (rtx x
)
37860 gcc_assert (MEM_P (x
));
37861 addr
= XEXP (x
, 0);
37862 if (! legitimate_indirect_address_p (addr
, reload_completed
)
37863 && ! legitimate_indexed_address_p (addr
, reload_completed
))
37865 if (GET_CODE (addr
) == PRE_INC
|| GET_CODE (addr
) == PRE_DEC
)
37867 rtx reg
= XEXP (addr
, 0);
37868 HOST_WIDE_INT size
= GET_MODE_SIZE (GET_MODE (x
));
37869 rtx size_rtx
= GEN_INT ((GET_CODE (addr
) == PRE_DEC
) ? -size
: size
);
37870 gcc_assert (REG_P (reg
));
37871 emit_insn (gen_add3_insn (reg
, reg
, size_rtx
));
37874 else if (GET_CODE (addr
) == PRE_MODIFY
)
37876 rtx reg
= XEXP (addr
, 0);
37877 rtx expr
= XEXP (addr
, 1);
37878 gcc_assert (REG_P (reg
));
37879 gcc_assert (GET_CODE (expr
) == PLUS
);
37880 emit_insn (gen_add3_insn (reg
, XEXP (expr
, 0), XEXP (expr
, 1)));
37884 x
= replace_equiv_address (x
, copy_addr_to_reg (addr
));
37890 /* Given a memory reference, if it is not in the form for altivec memory
37891 reference instructions (i.e. reg or reg+reg addressing with AND of -16),
37892 convert to the altivec format. */
37895 rs6000_address_for_altivec (rtx x
)
37897 gcc_assert (MEM_P (x
));
37898 if (!altivec_indexed_or_indirect_operand (x
, GET_MODE (x
)))
37900 rtx addr
= XEXP (x
, 0);
37902 if (!legitimate_indexed_address_p (addr
, reload_completed
)
37903 && !legitimate_indirect_address_p (addr
, reload_completed
))
37904 addr
= copy_to_mode_reg (Pmode
, addr
);
37906 addr
= gen_rtx_AND (Pmode
, addr
, GEN_INT (-16));
37907 x
= change_address (x
, GET_MODE (x
), addr
);
37913 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
37915 On the RS/6000, all integer constants are acceptable, most won't be valid
37916 for particular insns, though. Only easy FP constants are acceptable. */
37919 rs6000_legitimate_constant_p (machine_mode mode
, rtx x
)
37921 if (TARGET_ELF
&& tls_referenced_p (x
))
37924 return ((GET_CODE (x
) != CONST_DOUBLE
&& GET_CODE (x
) != CONST_VECTOR
)
37925 || GET_MODE (x
) == VOIDmode
37926 || (TARGET_POWERPC64
&& mode
== DImode
)
37927 || easy_fp_constant (x
, mode
)
37928 || easy_vector_constant (x
, mode
));
37932 /* Return TRUE iff the sequence ending in LAST sets the static chain. */
37935 chain_already_loaded (rtx_insn
*last
)
37937 for (; last
!= NULL
; last
= PREV_INSN (last
))
37939 if (NONJUMP_INSN_P (last
))
37941 rtx patt
= PATTERN (last
);
37943 if (GET_CODE (patt
) == SET
)
37945 rtx lhs
= XEXP (patt
, 0);
37947 if (REG_P (lhs
) && REGNO (lhs
) == STATIC_CHAIN_REGNUM
)
37955 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
37958 rs6000_call_aix (rtx value
, rtx func_desc
, rtx flag
, rtx cookie
)
37960 const bool direct_call_p
37961 = GET_CODE (func_desc
) == SYMBOL_REF
&& SYMBOL_REF_FUNCTION_P (func_desc
);
37962 rtx toc_reg
= gen_rtx_REG (Pmode
, TOC_REGNUM
);
37963 rtx toc_load
= NULL_RTX
;
37964 rtx toc_restore
= NULL_RTX
;
37966 rtx abi_reg
= NULL_RTX
;
37971 /* Handle longcall attributes. */
37972 if (INTVAL (cookie
) & CALL_LONG
)
37973 func_desc
= rs6000_longcall_ref (func_desc
);
37975 /* Handle indirect calls. */
37976 if (GET_CODE (func_desc
) != SYMBOL_REF
37977 || (DEFAULT_ABI
== ABI_AIX
&& !SYMBOL_REF_FUNCTION_P (func_desc
)))
37979 /* Save the TOC into its reserved slot before the call,
37980 and prepare to restore it after the call. */
37981 rtx stack_ptr
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
37982 rtx stack_toc_offset
= GEN_INT (RS6000_TOC_SAVE_SLOT
);
37983 rtx stack_toc_mem
= gen_frame_mem (Pmode
,
37984 gen_rtx_PLUS (Pmode
, stack_ptr
,
37985 stack_toc_offset
));
37986 rtx stack_toc_unspec
= gen_rtx_UNSPEC (Pmode
,
37987 gen_rtvec (1, stack_toc_offset
),
37989 toc_restore
= gen_rtx_SET (toc_reg
, stack_toc_unspec
);
37991 /* Can we optimize saving the TOC in the prologue or
37992 do we need to do it at every call? */
37993 if (TARGET_SAVE_TOC_INDIRECT
&& !cfun
->calls_alloca
)
37994 cfun
->machine
->save_toc_in_prologue
= true;
37997 MEM_VOLATILE_P (stack_toc_mem
) = 1;
37998 emit_move_insn (stack_toc_mem
, toc_reg
);
38001 if (DEFAULT_ABI
== ABI_ELFv2
)
38003 /* A function pointer in the ELFv2 ABI is just a plain address, but
38004 the ABI requires it to be loaded into r12 before the call. */
38005 func_addr
= gen_rtx_REG (Pmode
, 12);
38006 emit_move_insn (func_addr
, func_desc
);
38007 abi_reg
= func_addr
;
38011 /* A function pointer under AIX is a pointer to a data area whose
38012 first word contains the actual address of the function, whose
38013 second word contains a pointer to its TOC, and whose third word
38014 contains a value to place in the static chain register (r11).
38015 Note that if we load the static chain, our "trampoline" need
38016 not have any executable code. */
38018 /* Load up address of the actual function. */
38019 func_desc
= force_reg (Pmode
, func_desc
);
38020 func_addr
= gen_reg_rtx (Pmode
);
38021 emit_move_insn (func_addr
, gen_rtx_MEM (Pmode
, func_desc
));
38023 /* Prepare to load the TOC of the called function. Note that the
38024 TOC load must happen immediately before the actual call so
38025 that unwinding the TOC registers works correctly. See the
38026 comment in frob_update_context. */
38027 rtx func_toc_offset
= GEN_INT (GET_MODE_SIZE (Pmode
));
38028 rtx func_toc_mem
= gen_rtx_MEM (Pmode
,
38029 gen_rtx_PLUS (Pmode
, func_desc
,
38031 toc_load
= gen_rtx_USE (VOIDmode
, func_toc_mem
);
38033 /* If we have a static chain, load it up. But, if the call was
38034 originally direct, the 3rd word has not been written since no
38035 trampoline has been built, so we ought not to load it, lest we
38036 override a static chain value. */
38038 && TARGET_POINTERS_TO_NESTED_FUNCTIONS
38039 && !chain_already_loaded (get_current_sequence ()->next
->last
))
38041 rtx sc_reg
= gen_rtx_REG (Pmode
, STATIC_CHAIN_REGNUM
);
38042 rtx func_sc_offset
= GEN_INT (2 * GET_MODE_SIZE (Pmode
));
38043 rtx func_sc_mem
= gen_rtx_MEM (Pmode
,
38044 gen_rtx_PLUS (Pmode
, func_desc
,
38046 emit_move_insn (sc_reg
, func_sc_mem
);
38053 /* Direct calls use the TOC: for local calls, the callee will
38054 assume the TOC register is set; for non-local calls, the
38055 PLT stub needs the TOC register. */
38057 func_addr
= func_desc
;
38060 /* Create the call. */
38061 call
[0] = gen_rtx_CALL (VOIDmode
, gen_rtx_MEM (SImode
, func_addr
), flag
);
38062 if (value
!= NULL_RTX
)
38063 call
[0] = gen_rtx_SET (value
, call
[0]);
38067 call
[n_call
++] = toc_load
;
38069 call
[n_call
++] = toc_restore
;
38071 call
[n_call
++] = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, LR_REGNO
));
38073 insn
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec_v (n_call
, call
));
38074 insn
= emit_call_insn (insn
);
38076 /* Mention all registers defined by the ABI to hold information
38077 as uses in CALL_INSN_FUNCTION_USAGE. */
38079 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), abi_reg
);
38082 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
38085 rs6000_sibcall_aix (rtx value
, rtx func_desc
, rtx flag
, rtx cookie
)
38090 gcc_assert (INTVAL (cookie
) == 0);
38092 /* Create the call. */
38093 call
[0] = gen_rtx_CALL (VOIDmode
, gen_rtx_MEM (SImode
, func_desc
), flag
);
38094 if (value
!= NULL_RTX
)
38095 call
[0] = gen_rtx_SET (value
, call
[0]);
38097 call
[1] = simple_return_rtx
;
38099 insn
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec_v (2, call
));
38100 insn
= emit_call_insn (insn
);
38102 /* Note use of the TOC register. */
38103 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), gen_rtx_REG (Pmode
, TOC_REGNUM
));
38106 /* Return whether we need to always update the saved TOC pointer when we update
38107 the stack pointer. */
38110 rs6000_save_toc_in_prologue_p (void)
38112 return (cfun
&& cfun
->machine
&& cfun
->machine
->save_toc_in_prologue
);
38115 #ifdef HAVE_GAS_HIDDEN
38116 # define USE_HIDDEN_LINKONCE 1
38118 # define USE_HIDDEN_LINKONCE 0
38121 /* Fills in the label name that should be used for a 476 link stack thunk. */
38124 get_ppc476_thunk_name (char name
[32])
38126 gcc_assert (TARGET_LINK_STACK
);
38128 if (USE_HIDDEN_LINKONCE
)
38129 sprintf (name
, "__ppc476.get_thunk");
38131 ASM_GENERATE_INTERNAL_LABEL (name
, "LPPC476_", 0);
38134 /* This function emits the simple thunk routine that is used to preserve
38135 the link stack on the 476 cpu. */
38137 static void rs6000_code_end (void) ATTRIBUTE_UNUSED
;
38139 rs6000_code_end (void)
38144 if (!TARGET_LINK_STACK
)
38147 get_ppc476_thunk_name (name
);
38149 decl
= build_decl (BUILTINS_LOCATION
, FUNCTION_DECL
, get_identifier (name
),
38150 build_function_type_list (void_type_node
, NULL_TREE
));
38151 DECL_RESULT (decl
) = build_decl (BUILTINS_LOCATION
, RESULT_DECL
,
38152 NULL_TREE
, void_type_node
);
38153 TREE_PUBLIC (decl
) = 1;
38154 TREE_STATIC (decl
) = 1;
38157 if (USE_HIDDEN_LINKONCE
&& !TARGET_XCOFF
)
38159 cgraph_node::create (decl
)->set_comdat_group (DECL_ASSEMBLER_NAME (decl
));
38160 targetm
.asm_out
.unique_section (decl
, 0);
38161 switch_to_section (get_named_section (decl
, NULL
, 0));
38162 DECL_WEAK (decl
) = 1;
38163 ASM_WEAKEN_DECL (asm_out_file
, decl
, name
, 0);
38164 targetm
.asm_out
.globalize_label (asm_out_file
, name
);
38165 targetm
.asm_out
.assemble_visibility (decl
, VISIBILITY_HIDDEN
);
38166 ASM_DECLARE_FUNCTION_NAME (asm_out_file
, name
, decl
);
38171 switch_to_section (text_section
);
38172 ASM_OUTPUT_LABEL (asm_out_file
, name
);
38175 DECL_INITIAL (decl
) = make_node (BLOCK
);
38176 current_function_decl
= decl
;
38177 allocate_struct_function (decl
, false);
38178 init_function_start (decl
);
38179 first_function_block_is_cold
= false;
38180 /* Make sure unwind info is emitted for the thunk if needed. */
38181 final_start_function (emit_barrier (), asm_out_file
, 1);
38183 fputs ("\tblr\n", asm_out_file
);
38185 final_end_function ();
38186 init_insn_lengths ();
38187 free_after_compilation (cfun
);
38189 current_function_decl
= NULL
;
38192 /* Add r30 to hard reg set if the prologue sets it up and it is not
38193 pic_offset_table_rtx. */
38196 rs6000_set_up_by_prologue (struct hard_reg_set_container
*set
)
38198 if (!TARGET_SINGLE_PIC_BASE
38200 && TARGET_MINIMAL_TOC
38201 && !constant_pool_empty_p ())
38202 add_to_hard_reg_set (&set
->set
, Pmode
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
38203 if (cfun
->machine
->split_stack_argp_used
)
38204 add_to_hard_reg_set (&set
->set
, Pmode
, 12);
38206 /* Make sure the hard reg set doesn't include r2, which was possibly added
38207 via PIC_OFFSET_TABLE_REGNUM. */
38209 remove_from_hard_reg_set (&set
->set
, Pmode
, TOC_REGNUM
);
38213 /* Helper function for rs6000_split_logical to emit a logical instruction after
38214 spliting the operation to single GPR registers.
38216 DEST is the destination register.
38217 OP1 and OP2 are the input source registers.
38218 CODE is the base operation (AND, IOR, XOR, NOT).
38219 MODE is the machine mode.
38220 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38221 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38222 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
38225 rs6000_split_logical_inner (rtx dest
,
38228 enum rtx_code code
,
38230 bool complement_final_p
,
38231 bool complement_op1_p
,
38232 bool complement_op2_p
)
38236 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
38237 if (op2
&& GET_CODE (op2
) == CONST_INT
38238 && (mode
== SImode
|| (mode
== DImode
&& TARGET_POWERPC64
))
38239 && !complement_final_p
&& !complement_op1_p
&& !complement_op2_p
)
38241 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
38242 HOST_WIDE_INT value
= INTVAL (op2
) & mask
;
38244 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
38249 emit_insn (gen_rtx_SET (dest
, const0_rtx
));
38253 else if (value
== mask
)
38255 if (!rtx_equal_p (dest
, op1
))
38256 emit_insn (gen_rtx_SET (dest
, op1
));
38261 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
38262 into separate ORI/ORIS or XORI/XORIS instrucitons. */
38263 else if (code
== IOR
|| code
== XOR
)
38267 if (!rtx_equal_p (dest
, op1
))
38268 emit_insn (gen_rtx_SET (dest
, op1
));
38274 if (code
== AND
&& mode
== SImode
38275 && !complement_final_p
&& !complement_op1_p
&& !complement_op2_p
)
38277 emit_insn (gen_andsi3 (dest
, op1
, op2
));
38281 if (complement_op1_p
)
38282 op1
= gen_rtx_NOT (mode
, op1
);
38284 if (complement_op2_p
)
38285 op2
= gen_rtx_NOT (mode
, op2
);
38287 /* For canonical RTL, if only one arm is inverted it is the first. */
38288 if (!complement_op1_p
&& complement_op2_p
)
38289 std::swap (op1
, op2
);
38291 bool_rtx
= ((code
== NOT
)
38292 ? gen_rtx_NOT (mode
, op1
)
38293 : gen_rtx_fmt_ee (code
, mode
, op1
, op2
));
38295 if (complement_final_p
)
38296 bool_rtx
= gen_rtx_NOT (mode
, bool_rtx
);
38298 emit_insn (gen_rtx_SET (dest
, bool_rtx
));
38301 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
38302 operations are split immediately during RTL generation to allow for more
38303 optimizations of the AND/IOR/XOR.
38305 OPERANDS is an array containing the destination and two input operands.
38306 CODE is the base operation (AND, IOR, XOR, NOT).
38307 MODE is the machine mode.
38308 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38309 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38310 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
38311 CLOBBER_REG is either NULL or a scratch register of type CC to allow
38312 formation of the AND instructions. */
38315 rs6000_split_logical_di (rtx operands
[3],
38316 enum rtx_code code
,
38317 bool complement_final_p
,
38318 bool complement_op1_p
,
38319 bool complement_op2_p
)
38321 const HOST_WIDE_INT lower_32bits
= HOST_WIDE_INT_C(0xffffffff);
38322 const HOST_WIDE_INT upper_32bits
= ~ lower_32bits
;
38323 const HOST_WIDE_INT sign_bit
= HOST_WIDE_INT_C(0x80000000);
38324 enum hi_lo
{ hi
= 0, lo
= 1 };
38325 rtx op0_hi_lo
[2], op1_hi_lo
[2], op2_hi_lo
[2];
38328 op0_hi_lo
[hi
] = gen_highpart (SImode
, operands
[0]);
38329 op1_hi_lo
[hi
] = gen_highpart (SImode
, operands
[1]);
38330 op0_hi_lo
[lo
] = gen_lowpart (SImode
, operands
[0]);
38331 op1_hi_lo
[lo
] = gen_lowpart (SImode
, operands
[1]);
38334 op2_hi_lo
[hi
] = op2_hi_lo
[lo
] = NULL_RTX
;
38337 if (GET_CODE (operands
[2]) != CONST_INT
)
38339 op2_hi_lo
[hi
] = gen_highpart_mode (SImode
, DImode
, operands
[2]);
38340 op2_hi_lo
[lo
] = gen_lowpart (SImode
, operands
[2]);
38344 HOST_WIDE_INT value
= INTVAL (operands
[2]);
38345 HOST_WIDE_INT value_hi_lo
[2];
38347 gcc_assert (!complement_final_p
);
38348 gcc_assert (!complement_op1_p
);
38349 gcc_assert (!complement_op2_p
);
38351 value_hi_lo
[hi
] = value
>> 32;
38352 value_hi_lo
[lo
] = value
& lower_32bits
;
38354 for (i
= 0; i
< 2; i
++)
38356 HOST_WIDE_INT sub_value
= value_hi_lo
[i
];
38358 if (sub_value
& sign_bit
)
38359 sub_value
|= upper_32bits
;
38361 op2_hi_lo
[i
] = GEN_INT (sub_value
);
38363 /* If this is an AND instruction, check to see if we need to load
38364 the value in a register. */
38365 if (code
== AND
&& sub_value
!= -1 && sub_value
!= 0
38366 && !and_operand (op2_hi_lo
[i
], SImode
))
38367 op2_hi_lo
[i
] = force_reg (SImode
, op2_hi_lo
[i
]);
38372 for (i
= 0; i
< 2; i
++)
38374 /* Split large IOR/XOR operations. */
38375 if ((code
== IOR
|| code
== XOR
)
38376 && GET_CODE (op2_hi_lo
[i
]) == CONST_INT
38377 && !complement_final_p
38378 && !complement_op1_p
38379 && !complement_op2_p
38380 && !logical_const_operand (op2_hi_lo
[i
], SImode
))
38382 HOST_WIDE_INT value
= INTVAL (op2_hi_lo
[i
]);
38383 HOST_WIDE_INT hi_16bits
= value
& HOST_WIDE_INT_C(0xffff0000);
38384 HOST_WIDE_INT lo_16bits
= value
& HOST_WIDE_INT_C(0x0000ffff);
38385 rtx tmp
= gen_reg_rtx (SImode
);
38387 /* Make sure the constant is sign extended. */
38388 if ((hi_16bits
& sign_bit
) != 0)
38389 hi_16bits
|= upper_32bits
;
38391 rs6000_split_logical_inner (tmp
, op1_hi_lo
[i
], GEN_INT (hi_16bits
),
38392 code
, SImode
, false, false, false);
38394 rs6000_split_logical_inner (op0_hi_lo
[i
], tmp
, GEN_INT (lo_16bits
),
38395 code
, SImode
, false, false, false);
38398 rs6000_split_logical_inner (op0_hi_lo
[i
], op1_hi_lo
[i
], op2_hi_lo
[i
],
38399 code
, SImode
, complement_final_p
,
38400 complement_op1_p
, complement_op2_p
);
38406 /* Split the insns that make up boolean operations operating on multiple GPR
38407 registers. The boolean MD patterns ensure that the inputs either are
38408 exactly the same as the output registers, or there is no overlap.
38410 OPERANDS is an array containing the destination and two input operands.
38411 CODE is the base operation (AND, IOR, XOR, NOT).
38412 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38413 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38414 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
38417 rs6000_split_logical (rtx operands
[3],
38418 enum rtx_code code
,
38419 bool complement_final_p
,
38420 bool complement_op1_p
,
38421 bool complement_op2_p
)
38423 machine_mode mode
= GET_MODE (operands
[0]);
38424 machine_mode sub_mode
;
38426 int sub_size
, regno0
, regno1
, nregs
, i
;
38428 /* If this is DImode, use the specialized version that can run before
38429 register allocation. */
38430 if (mode
== DImode
&& !TARGET_POWERPC64
)
38432 rs6000_split_logical_di (operands
, code
, complement_final_p
,
38433 complement_op1_p
, complement_op2_p
);
38439 op2
= (code
== NOT
) ? NULL_RTX
: operands
[2];
38440 sub_mode
= (TARGET_POWERPC64
) ? DImode
: SImode
;
38441 sub_size
= GET_MODE_SIZE (sub_mode
);
38442 regno0
= REGNO (op0
);
38443 regno1
= REGNO (op1
);
38445 gcc_assert (reload_completed
);
38446 gcc_assert (IN_RANGE (regno0
, FIRST_GPR_REGNO
, LAST_GPR_REGNO
));
38447 gcc_assert (IN_RANGE (regno1
, FIRST_GPR_REGNO
, LAST_GPR_REGNO
));
38449 nregs
= rs6000_hard_regno_nregs
[(int)mode
][regno0
];
38450 gcc_assert (nregs
> 1);
38452 if (op2
&& REG_P (op2
))
38453 gcc_assert (IN_RANGE (REGNO (op2
), FIRST_GPR_REGNO
, LAST_GPR_REGNO
));
38455 for (i
= 0; i
< nregs
; i
++)
38457 int offset
= i
* sub_size
;
38458 rtx sub_op0
= simplify_subreg (sub_mode
, op0
, mode
, offset
);
38459 rtx sub_op1
= simplify_subreg (sub_mode
, op1
, mode
, offset
);
38460 rtx sub_op2
= ((code
== NOT
)
38462 : simplify_subreg (sub_mode
, op2
, mode
, offset
));
38464 rs6000_split_logical_inner (sub_op0
, sub_op1
, sub_op2
, code
, sub_mode
,
38465 complement_final_p
, complement_op1_p
,
38473 /* Return true if the peephole2 can combine a load involving a combination of
38474 an addis instruction and a load with an offset that can be fused together on
38478 fusion_gpr_load_p (rtx addis_reg
, /* register set via addis. */
38479 rtx addis_value
, /* addis value. */
38480 rtx target
, /* target register that is loaded. */
38481 rtx mem
) /* bottom part of the memory addr. */
38486 /* Validate arguments. */
38487 if (!base_reg_operand (addis_reg
, GET_MODE (addis_reg
)))
38490 if (!base_reg_operand (target
, GET_MODE (target
)))
38493 if (!fusion_gpr_addis (addis_value
, GET_MODE (addis_value
)))
38496 /* Allow sign/zero extension. */
38497 if (GET_CODE (mem
) == ZERO_EXTEND
38498 || (GET_CODE (mem
) == SIGN_EXTEND
&& TARGET_P8_FUSION_SIGN
))
38499 mem
= XEXP (mem
, 0);
38504 if (!fusion_gpr_mem_load (mem
, GET_MODE (mem
)))
38507 addr
= XEXP (mem
, 0); /* either PLUS or LO_SUM. */
38508 if (GET_CODE (addr
) != PLUS
&& GET_CODE (addr
) != LO_SUM
)
38511 /* Validate that the register used to load the high value is either the
38512 register being loaded, or we can safely replace its use.
38514 This function is only called from the peephole2 pass and we assume that
38515 there are 2 instructions in the peephole (addis and load), so we want to
38516 check if the target register was not used in the memory address and the
38517 register to hold the addis result is dead after the peephole. */
38518 if (REGNO (addis_reg
) != REGNO (target
))
38520 if (reg_mentioned_p (target
, mem
))
38523 if (!peep2_reg_dead_p (2, addis_reg
))
38526 /* If the target register being loaded is the stack pointer, we must
38527 avoid loading any other value into it, even temporarily. */
38528 if (REG_P (target
) && REGNO (target
) == STACK_POINTER_REGNUM
)
38532 base_reg
= XEXP (addr
, 0);
38533 return REGNO (addis_reg
) == REGNO (base_reg
);
38536 /* During the peephole2 pass, adjust and expand the insns for a load fusion
38537 sequence. We adjust the addis register to use the target register. If the
38538 load sign extends, we adjust the code to do the zero extending load, and an
38539 explicit sign extension later since the fusion only covers zero extending
38543 operands[0] register set with addis (to be replaced with target)
38544 operands[1] value set via addis
38545 operands[2] target register being loaded
38546 operands[3] D-form memory reference using operands[0]. */
38549 expand_fusion_gpr_load (rtx
*operands
)
38551 rtx addis_value
= operands
[1];
38552 rtx target
= operands
[2];
38553 rtx orig_mem
= operands
[3];
38554 rtx new_addr
, new_mem
, orig_addr
, offset
;
38555 enum rtx_code plus_or_lo_sum
;
38556 machine_mode target_mode
= GET_MODE (target
);
38557 machine_mode extend_mode
= target_mode
;
38558 machine_mode ptr_mode
= Pmode
;
38559 enum rtx_code extend
= UNKNOWN
;
38561 if (GET_CODE (orig_mem
) == ZERO_EXTEND
38562 || (TARGET_P8_FUSION_SIGN
&& GET_CODE (orig_mem
) == SIGN_EXTEND
))
38564 extend
= GET_CODE (orig_mem
);
38565 orig_mem
= XEXP (orig_mem
, 0);
38566 target_mode
= GET_MODE (orig_mem
);
38569 gcc_assert (MEM_P (orig_mem
));
38571 orig_addr
= XEXP (orig_mem
, 0);
38572 plus_or_lo_sum
= GET_CODE (orig_addr
);
38573 gcc_assert (plus_or_lo_sum
== PLUS
|| plus_or_lo_sum
== LO_SUM
);
38575 offset
= XEXP (orig_addr
, 1);
38576 new_addr
= gen_rtx_fmt_ee (plus_or_lo_sum
, ptr_mode
, addis_value
, offset
);
38577 new_mem
= replace_equiv_address_nv (orig_mem
, new_addr
, false);
38579 if (extend
!= UNKNOWN
)
38580 new_mem
= gen_rtx_fmt_e (ZERO_EXTEND
, extend_mode
, new_mem
);
38582 new_mem
= gen_rtx_UNSPEC (extend_mode
, gen_rtvec (1, new_mem
),
38583 UNSPEC_FUSION_GPR
);
38584 emit_insn (gen_rtx_SET (target
, new_mem
));
38586 if (extend
== SIGN_EXTEND
)
38588 int sub_off
= ((BYTES_BIG_ENDIAN
)
38589 ? GET_MODE_SIZE (extend_mode
) - GET_MODE_SIZE (target_mode
)
38592 = simplify_subreg (target_mode
, target
, extend_mode
, sub_off
);
38594 emit_insn (gen_rtx_SET (target
,
38595 gen_rtx_SIGN_EXTEND (extend_mode
, sign_reg
)));
38601 /* Emit the addis instruction that will be part of a fused instruction
38605 emit_fusion_addis (rtx target
, rtx addis_value
, const char *comment
,
38606 const char *mode_name
)
38609 char insn_template
[80];
38610 const char *addis_str
= NULL
;
38611 const char *comment_str
= ASM_COMMENT_START
;
38613 if (*comment_str
== ' ')
38616 /* Emit the addis instruction. */
38617 fuse_ops
[0] = target
;
38618 if (satisfies_constraint_L (addis_value
))
38620 fuse_ops
[1] = addis_value
;
38621 addis_str
= "lis %0,%v1";
38624 else if (GET_CODE (addis_value
) == PLUS
)
38626 rtx op0
= XEXP (addis_value
, 0);
38627 rtx op1
= XEXP (addis_value
, 1);
38629 if (REG_P (op0
) && CONST_INT_P (op1
)
38630 && satisfies_constraint_L (op1
))
38634 addis_str
= "addis %0,%1,%v2";
38638 else if (GET_CODE (addis_value
) == HIGH
)
38640 rtx value
= XEXP (addis_value
, 0);
38641 if (GET_CODE (value
) == UNSPEC
&& XINT (value
, 1) == UNSPEC_TOCREL
)
38643 fuse_ops
[1] = XVECEXP (value
, 0, 0); /* symbol ref. */
38644 fuse_ops
[2] = XVECEXP (value
, 0, 1); /* TOC register. */
38646 addis_str
= "addis %0,%2,%1@toc@ha";
38648 else if (TARGET_XCOFF
)
38649 addis_str
= "addis %0,%1@u(%2)";
38652 gcc_unreachable ();
38655 else if (GET_CODE (value
) == PLUS
)
38657 rtx op0
= XEXP (value
, 0);
38658 rtx op1
= XEXP (value
, 1);
38660 if (GET_CODE (op0
) == UNSPEC
38661 && XINT (op0
, 1) == UNSPEC_TOCREL
38662 && CONST_INT_P (op1
))
38664 fuse_ops
[1] = XVECEXP (op0
, 0, 0); /* symbol ref. */
38665 fuse_ops
[2] = XVECEXP (op0
, 0, 1); /* TOC register. */
38668 addis_str
= "addis %0,%2,%1+%3@toc@ha";
38670 else if (TARGET_XCOFF
)
38671 addis_str
= "addis %0,%1+%3@u(%2)";
38674 gcc_unreachable ();
38678 else if (satisfies_constraint_L (value
))
38680 fuse_ops
[1] = value
;
38681 addis_str
= "lis %0,%v1";
38684 else if (TARGET_ELF
&& !TARGET_POWERPC64
&& CONSTANT_P (value
))
38686 fuse_ops
[1] = value
;
38687 addis_str
= "lis %0,%1@ha";
38692 fatal_insn ("Could not generate addis value for fusion", addis_value
);
38694 sprintf (insn_template
, "%s\t\t%s %s, type %s", addis_str
, comment_str
,
38695 comment
, mode_name
);
38696 output_asm_insn (insn_template
, fuse_ops
);
38699 /* Emit a D-form load or store instruction that is the second instruction
38700 of a fusion sequence. */
38703 emit_fusion_load_store (rtx load_store_reg
, rtx addis_reg
, rtx offset
,
38704 const char *insn_str
)
38707 char insn_template
[80];
38709 fuse_ops
[0] = load_store_reg
;
38710 fuse_ops
[1] = addis_reg
;
38712 if (CONST_INT_P (offset
) && satisfies_constraint_I (offset
))
38714 sprintf (insn_template
, "%s %%0,%%2(%%1)", insn_str
);
38715 fuse_ops
[2] = offset
;
38716 output_asm_insn (insn_template
, fuse_ops
);
38719 else if (GET_CODE (offset
) == UNSPEC
38720 && XINT (offset
, 1) == UNSPEC_TOCREL
)
38723 sprintf (insn_template
, "%s %%0,%%2@toc@l(%%1)", insn_str
);
38725 else if (TARGET_XCOFF
)
38726 sprintf (insn_template
, "%s %%0,%%2@l(%%1)", insn_str
);
38729 gcc_unreachable ();
38731 fuse_ops
[2] = XVECEXP (offset
, 0, 0);
38732 output_asm_insn (insn_template
, fuse_ops
);
38735 else if (GET_CODE (offset
) == PLUS
38736 && GET_CODE (XEXP (offset
, 0)) == UNSPEC
38737 && XINT (XEXP (offset
, 0), 1) == UNSPEC_TOCREL
38738 && CONST_INT_P (XEXP (offset
, 1)))
38740 rtx tocrel_unspec
= XEXP (offset
, 0);
38742 sprintf (insn_template
, "%s %%0,%%2+%%3@toc@l(%%1)", insn_str
);
38744 else if (TARGET_XCOFF
)
38745 sprintf (insn_template
, "%s %%0,%%2+%%3@l(%%1)", insn_str
);
38748 gcc_unreachable ();
38750 fuse_ops
[2] = XVECEXP (tocrel_unspec
, 0, 0);
38751 fuse_ops
[3] = XEXP (offset
, 1);
38752 output_asm_insn (insn_template
, fuse_ops
);
38755 else if (TARGET_ELF
&& !TARGET_POWERPC64
&& CONSTANT_P (offset
))
38757 sprintf (insn_template
, "%s %%0,%%2@l(%%1)", insn_str
);
38759 fuse_ops
[2] = offset
;
38760 output_asm_insn (insn_template
, fuse_ops
);
38764 fatal_insn ("Unable to generate load/store offset for fusion", offset
);
38769 /* Wrap a TOC address that can be fused to indicate that special fusion
38770 processing is needed. */
38773 fusion_wrap_memory_address (rtx old_mem
)
38775 rtx old_addr
= XEXP (old_mem
, 0);
38776 rtvec v
= gen_rtvec (1, old_addr
);
38777 rtx new_addr
= gen_rtx_UNSPEC (Pmode
, v
, UNSPEC_FUSION_ADDIS
);
38778 return replace_equiv_address_nv (old_mem
, new_addr
, false);
38781 /* Given an address, convert it into the addis and load offset parts. Addresses
38782 created during the peephole2 process look like:
38783 (lo_sum (high (unspec [(sym)] UNSPEC_TOCREL))
38784 (unspec [(...)] UNSPEC_TOCREL))
38786 Addresses created via toc fusion look like:
38787 (unspec [(unspec [(...)] UNSPEC_TOCREL)] UNSPEC_FUSION_ADDIS)) */
38790 fusion_split_address (rtx addr
, rtx
*p_hi
, rtx
*p_lo
)
38794 if (GET_CODE (addr
) == UNSPEC
&& XINT (addr
, 1) == UNSPEC_FUSION_ADDIS
)
38796 lo
= XVECEXP (addr
, 0, 0);
38797 hi
= gen_rtx_HIGH (Pmode
, lo
);
38799 else if (GET_CODE (addr
) == PLUS
|| GET_CODE (addr
) == LO_SUM
)
38801 hi
= XEXP (addr
, 0);
38802 lo
= XEXP (addr
, 1);
38805 gcc_unreachable ();
38811 /* Return a string to fuse an addis instruction with a gpr load to the same
38812 register that we loaded up the addis instruction. The address that is used
38813 is the logical address that was formed during peephole2:
38814 (lo_sum (high) (low-part))
38816 Or the address is the TOC address that is wrapped before register allocation:
38817 (unspec [(addr) (toc-reg)] UNSPEC_FUSION_ADDIS)
38819 The code is complicated, so we call output_asm_insn directly, and just
38823 emit_fusion_gpr_load (rtx target
, rtx mem
)
38828 const char *load_str
= NULL
;
38829 const char *mode_name
= NULL
;
38832 if (GET_CODE (mem
) == ZERO_EXTEND
)
38833 mem
= XEXP (mem
, 0);
38835 gcc_assert (REG_P (target
) && MEM_P (mem
));
38837 addr
= XEXP (mem
, 0);
38838 fusion_split_address (addr
, &addis_value
, &load_offset
);
38840 /* Now emit the load instruction to the same register. */
38841 mode
= GET_MODE (mem
);
38845 mode_name
= "char";
38850 mode_name
= "short";
38856 mode_name
= (mode
== SFmode
) ? "float" : "int";
38862 gcc_assert (TARGET_POWERPC64
);
38863 mode_name
= (mode
== DFmode
) ? "double" : "long";
38868 fatal_insn ("Bad GPR fusion", gen_rtx_SET (target
, mem
));
38871 /* Emit the addis instruction. */
38872 emit_fusion_addis (target
, addis_value
, "gpr load fusion", mode_name
);
38874 /* Emit the D-form load instruction. */
38875 emit_fusion_load_store (target
, target
, load_offset
, load_str
);
38881 /* Return true if the peephole2 can combine a load/store involving a
38882 combination of an addis instruction and the memory operation. This was
38883 added to the ISA 3.0 (power9) hardware. */
38886 fusion_p9_p (rtx addis_reg
, /* register set via addis. */
38887 rtx addis_value
, /* addis value. */
38888 rtx dest
, /* destination (memory or register). */
38889 rtx src
) /* source (register or memory). */
38891 rtx addr
, mem
, offset
;
38892 machine_mode mode
= GET_MODE (src
);
38894 /* Validate arguments. */
38895 if (!base_reg_operand (addis_reg
, GET_MODE (addis_reg
)))
38898 if (!fusion_gpr_addis (addis_value
, GET_MODE (addis_value
)))
38901 /* Ignore extend operations that are part of the load. */
38902 if (GET_CODE (src
) == FLOAT_EXTEND
|| GET_CODE (src
) == ZERO_EXTEND
)
38903 src
= XEXP (src
, 0);
38905 /* Test for memory<-register or register<-memory. */
38906 if (fpr_reg_operand (src
, mode
) || int_reg_operand (src
, mode
))
38914 else if (MEM_P (src
))
38916 if (!fpr_reg_operand (dest
, mode
) && !int_reg_operand (dest
, mode
))
38925 addr
= XEXP (mem
, 0); /* either PLUS or LO_SUM. */
38926 if (GET_CODE (addr
) == PLUS
)
38928 if (!rtx_equal_p (addis_reg
, XEXP (addr
, 0)))
38931 return satisfies_constraint_I (XEXP (addr
, 1));
38934 else if (GET_CODE (addr
) == LO_SUM
)
38936 if (!rtx_equal_p (addis_reg
, XEXP (addr
, 0)))
38939 offset
= XEXP (addr
, 1);
38940 if (TARGET_XCOFF
|| (TARGET_ELF
&& TARGET_POWERPC64
))
38941 return small_toc_ref (offset
, GET_MODE (offset
));
38943 else if (TARGET_ELF
&& !TARGET_POWERPC64
)
38944 return CONSTANT_P (offset
);
38950 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
38954 operands[0] register set with addis
38955 operands[1] value set via addis
38956 operands[2] target register being loaded
38957 operands[3] D-form memory reference using operands[0].
38959 This is similar to the fusion introduced with power8, except it scales to
38960 both loads/stores and does not require the result register to be the same as
38961 the base register. At the moment, we only do this if register set with addis
38965 expand_fusion_p9_load (rtx
*operands
)
38967 rtx tmp_reg
= operands
[0];
38968 rtx addis_value
= operands
[1];
38969 rtx target
= operands
[2];
38970 rtx orig_mem
= operands
[3];
38971 rtx new_addr
, new_mem
, orig_addr
, offset
, set
, clobber
, insn
;
38972 enum rtx_code plus_or_lo_sum
;
38973 machine_mode target_mode
= GET_MODE (target
);
38974 machine_mode extend_mode
= target_mode
;
38975 machine_mode ptr_mode
= Pmode
;
38976 enum rtx_code extend
= UNKNOWN
;
38978 if (GET_CODE (orig_mem
) == FLOAT_EXTEND
|| GET_CODE (orig_mem
) == ZERO_EXTEND
)
38980 extend
= GET_CODE (orig_mem
);
38981 orig_mem
= XEXP (orig_mem
, 0);
38982 target_mode
= GET_MODE (orig_mem
);
38985 gcc_assert (MEM_P (orig_mem
));
38987 orig_addr
= XEXP (orig_mem
, 0);
38988 plus_or_lo_sum
= GET_CODE (orig_addr
);
38989 gcc_assert (plus_or_lo_sum
== PLUS
|| plus_or_lo_sum
== LO_SUM
);
38991 offset
= XEXP (orig_addr
, 1);
38992 new_addr
= gen_rtx_fmt_ee (plus_or_lo_sum
, ptr_mode
, addis_value
, offset
);
38993 new_mem
= replace_equiv_address_nv (orig_mem
, new_addr
, false);
38995 if (extend
!= UNKNOWN
)
38996 new_mem
= gen_rtx_fmt_e (extend
, extend_mode
, new_mem
);
38998 new_mem
= gen_rtx_UNSPEC (extend_mode
, gen_rtvec (1, new_mem
),
39001 set
= gen_rtx_SET (target
, new_mem
);
39002 clobber
= gen_rtx_CLOBBER (VOIDmode
, tmp_reg
);
39003 insn
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, set
, clobber
));
39009 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
39013 operands[0] register set with addis
39014 operands[1] value set via addis
39015 operands[2] target D-form memory being stored to
39016 operands[3] register being stored
39018 This is similar to the fusion introduced with power8, except it scales to
39019 both loads/stores and does not require the result register to be the same as
39020 the base register. At the moment, we only do this if register set with addis
39024 expand_fusion_p9_store (rtx
*operands
)
39026 rtx tmp_reg
= operands
[0];
39027 rtx addis_value
= operands
[1];
39028 rtx orig_mem
= operands
[2];
39029 rtx src
= operands
[3];
39030 rtx new_addr
, new_mem
, orig_addr
, offset
, set
, clobber
, insn
, new_src
;
39031 enum rtx_code plus_or_lo_sum
;
39032 machine_mode target_mode
= GET_MODE (orig_mem
);
39033 machine_mode ptr_mode
= Pmode
;
39035 gcc_assert (MEM_P (orig_mem
));
39037 orig_addr
= XEXP (orig_mem
, 0);
39038 plus_or_lo_sum
= GET_CODE (orig_addr
);
39039 gcc_assert (plus_or_lo_sum
== PLUS
|| plus_or_lo_sum
== LO_SUM
);
39041 offset
= XEXP (orig_addr
, 1);
39042 new_addr
= gen_rtx_fmt_ee (plus_or_lo_sum
, ptr_mode
, addis_value
, offset
);
39043 new_mem
= replace_equiv_address_nv (orig_mem
, new_addr
, false);
39045 new_src
= gen_rtx_UNSPEC (target_mode
, gen_rtvec (1, src
),
39048 set
= gen_rtx_SET (new_mem
, new_src
);
39049 clobber
= gen_rtx_CLOBBER (VOIDmode
, tmp_reg
);
39050 insn
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, set
, clobber
));
39056 /* Return a string to fuse an addis instruction with a load using extended
39057 fusion. The address that is used is the logical address that was formed
39058 during peephole2: (lo_sum (high) (low-part))
39060 The code is complicated, so we call output_asm_insn directly, and just
39064 emit_fusion_p9_load (rtx reg
, rtx mem
, rtx tmp_reg
)
39066 machine_mode mode
= GET_MODE (reg
);
39070 const char *load_string
;
39073 if (GET_CODE (mem
) == FLOAT_EXTEND
|| GET_CODE (mem
) == ZERO_EXTEND
)
39075 mem
= XEXP (mem
, 0);
39076 mode
= GET_MODE (mem
);
39079 if (GET_CODE (reg
) == SUBREG
)
39081 gcc_assert (SUBREG_BYTE (reg
) == 0);
39082 reg
= SUBREG_REG (reg
);
39086 fatal_insn ("emit_fusion_p9_load, bad reg #1", reg
);
39089 if (FP_REGNO_P (r
))
39091 if (mode
== SFmode
)
39092 load_string
= "lfs";
39093 else if (mode
== DFmode
|| mode
== DImode
)
39094 load_string
= "lfd";
39096 gcc_unreachable ();
39098 else if (ALTIVEC_REGNO_P (r
) && TARGET_P9_VECTOR
)
39100 if (mode
== SFmode
)
39101 load_string
= "lxssp";
39102 else if (mode
== DFmode
|| mode
== DImode
)
39103 load_string
= "lxsd";
39105 gcc_unreachable ();
39107 else if (INT_REGNO_P (r
))
39112 load_string
= "lbz";
39115 load_string
= "lhz";
39119 load_string
= "lwz";
39123 if (!TARGET_POWERPC64
)
39124 gcc_unreachable ();
39125 load_string
= "ld";
39128 gcc_unreachable ();
39132 fatal_insn ("emit_fusion_p9_load, bad reg #2", reg
);
39135 fatal_insn ("emit_fusion_p9_load not MEM", mem
);
39137 addr
= XEXP (mem
, 0);
39138 fusion_split_address (addr
, &hi
, &lo
);
39140 /* Emit the addis instruction. */
39141 emit_fusion_addis (tmp_reg
, hi
, "power9 load fusion", GET_MODE_NAME (mode
));
39143 /* Emit the D-form load instruction. */
39144 emit_fusion_load_store (reg
, tmp_reg
, lo
, load_string
);
39149 /* Return a string to fuse an addis instruction with a store using extended
39150 fusion. The address that is used is the logical address that was formed
39151 during peephole2: (lo_sum (high) (low-part))
39153 The code is complicated, so we call output_asm_insn directly, and just
39157 emit_fusion_p9_store (rtx mem
, rtx reg
, rtx tmp_reg
)
39159 machine_mode mode
= GET_MODE (reg
);
39163 const char *store_string
;
39166 if (GET_CODE (reg
) == SUBREG
)
39168 gcc_assert (SUBREG_BYTE (reg
) == 0);
39169 reg
= SUBREG_REG (reg
);
39173 fatal_insn ("emit_fusion_p9_store, bad reg #1", reg
);
39176 if (FP_REGNO_P (r
))
39178 if (mode
== SFmode
)
39179 store_string
= "stfs";
39180 else if (mode
== DFmode
)
39181 store_string
= "stfd";
39183 gcc_unreachable ();
39185 else if (ALTIVEC_REGNO_P (r
) && TARGET_P9_VECTOR
)
39187 if (mode
== SFmode
)
39188 store_string
= "stxssp";
39189 else if (mode
== DFmode
|| mode
== DImode
)
39190 store_string
= "stxsd";
39192 gcc_unreachable ();
39194 else if (INT_REGNO_P (r
))
39199 store_string
= "stb";
39202 store_string
= "sth";
39206 store_string
= "stw";
39210 if (!TARGET_POWERPC64
)
39211 gcc_unreachable ();
39212 store_string
= "std";
39215 gcc_unreachable ();
39219 fatal_insn ("emit_fusion_p9_store, bad reg #2", reg
);
39222 fatal_insn ("emit_fusion_p9_store not MEM", mem
);
39224 addr
= XEXP (mem
, 0);
39225 fusion_split_address (addr
, &hi
, &lo
);
39227 /* Emit the addis instruction. */
39228 emit_fusion_addis (tmp_reg
, hi
, "power9 store fusion", GET_MODE_NAME (mode
));
39230 /* Emit the D-form load instruction. */
39231 emit_fusion_load_store (reg
, tmp_reg
, lo
, store_string
);
39236 #ifdef RS6000_GLIBC_ATOMIC_FENV
39237 /* Function declarations for rs6000_atomic_assign_expand_fenv. */
39238 static tree atomic_hold_decl
, atomic_clear_decl
, atomic_update_decl
;
39241 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
39244 rs6000_atomic_assign_expand_fenv (tree
*hold
, tree
*clear
, tree
*update
)
39246 if (!TARGET_HARD_FLOAT
)
39248 #ifdef RS6000_GLIBC_ATOMIC_FENV
39249 if (atomic_hold_decl
== NULL_TREE
)
39252 = build_decl (BUILTINS_LOCATION
, FUNCTION_DECL
,
39253 get_identifier ("__atomic_feholdexcept"),
39254 build_function_type_list (void_type_node
,
39255 double_ptr_type_node
,
39257 TREE_PUBLIC (atomic_hold_decl
) = 1;
39258 DECL_EXTERNAL (atomic_hold_decl
) = 1;
39261 if (atomic_clear_decl
== NULL_TREE
)
39264 = build_decl (BUILTINS_LOCATION
, FUNCTION_DECL
,
39265 get_identifier ("__atomic_feclearexcept"),
39266 build_function_type_list (void_type_node
,
39268 TREE_PUBLIC (atomic_clear_decl
) = 1;
39269 DECL_EXTERNAL (atomic_clear_decl
) = 1;
39272 tree const_double
= build_qualified_type (double_type_node
,
39274 tree const_double_ptr
= build_pointer_type (const_double
);
39275 if (atomic_update_decl
== NULL_TREE
)
39278 = build_decl (BUILTINS_LOCATION
, FUNCTION_DECL
,
39279 get_identifier ("__atomic_feupdateenv"),
39280 build_function_type_list (void_type_node
,
39283 TREE_PUBLIC (atomic_update_decl
) = 1;
39284 DECL_EXTERNAL (atomic_update_decl
) = 1;
39287 tree fenv_var
= create_tmp_var_raw (double_type_node
);
39288 TREE_ADDRESSABLE (fenv_var
) = 1;
39289 tree fenv_addr
= build1 (ADDR_EXPR
, double_ptr_type_node
, fenv_var
);
39291 *hold
= build_call_expr (atomic_hold_decl
, 1, fenv_addr
);
39292 *clear
= build_call_expr (atomic_clear_decl
, 0);
39293 *update
= build_call_expr (atomic_update_decl
, 1,
39294 fold_convert (const_double_ptr
, fenv_addr
));
39299 tree mffs
= rs6000_builtin_decls
[RS6000_BUILTIN_MFFS
];
39300 tree mtfsf
= rs6000_builtin_decls
[RS6000_BUILTIN_MTFSF
];
39301 tree call_mffs
= build_call_expr (mffs
, 0);
39303 /* Generates the equivalent of feholdexcept (&fenv_var)
39305 *fenv_var = __builtin_mffs ();
39307 *(uint64_t*)&fenv_hold = *(uint64_t*)fenv_var & 0xffffffff00000007LL;
39308 __builtin_mtfsf (0xff, fenv_hold); */
39310 /* Mask to clear everything except for the rounding modes and non-IEEE
39311 arithmetic flag. */
39312 const unsigned HOST_WIDE_INT hold_exception_mask
=
39313 HOST_WIDE_INT_C (0xffffffff00000007);
39315 tree fenv_var
= create_tmp_var_raw (double_type_node
);
39317 tree hold_mffs
= build2 (MODIFY_EXPR
, void_type_node
, fenv_var
, call_mffs
);
39319 tree fenv_llu
= build1 (VIEW_CONVERT_EXPR
, uint64_type_node
, fenv_var
);
39320 tree fenv_llu_and
= build2 (BIT_AND_EXPR
, uint64_type_node
, fenv_llu
,
39321 build_int_cst (uint64_type_node
,
39322 hold_exception_mask
));
39324 tree fenv_hold_mtfsf
= build1 (VIEW_CONVERT_EXPR
, double_type_node
,
39327 tree hold_mtfsf
= build_call_expr (mtfsf
, 2,
39328 build_int_cst (unsigned_type_node
, 0xff),
39331 *hold
= build2 (COMPOUND_EXPR
, void_type_node
, hold_mffs
, hold_mtfsf
);
39333 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT):
39335 double fenv_clear = __builtin_mffs ();
39336 *(uint64_t)&fenv_clear &= 0xffffffff00000000LL;
39337 __builtin_mtfsf (0xff, fenv_clear); */
39339 /* Mask to clear everything except for the rounding modes and non-IEEE
39340 arithmetic flag. */
39341 const unsigned HOST_WIDE_INT clear_exception_mask
=
39342 HOST_WIDE_INT_C (0xffffffff00000000);
39344 tree fenv_clear
= create_tmp_var_raw (double_type_node
);
39346 tree clear_mffs
= build2 (MODIFY_EXPR
, void_type_node
, fenv_clear
, call_mffs
);
39348 tree fenv_clean_llu
= build1 (VIEW_CONVERT_EXPR
, uint64_type_node
, fenv_clear
);
39349 tree fenv_clear_llu_and
= build2 (BIT_AND_EXPR
, uint64_type_node
,
39351 build_int_cst (uint64_type_node
,
39352 clear_exception_mask
));
39354 tree fenv_clear_mtfsf
= build1 (VIEW_CONVERT_EXPR
, double_type_node
,
39355 fenv_clear_llu_and
);
39357 tree clear_mtfsf
= build_call_expr (mtfsf
, 2,
39358 build_int_cst (unsigned_type_node
, 0xff),
39361 *clear
= build2 (COMPOUND_EXPR
, void_type_node
, clear_mffs
, clear_mtfsf
);
39363 /* Generates the equivalent of feupdateenv (&fenv_var)
39365 double old_fenv = __builtin_mffs ();
39366 double fenv_update;
39367 *(uint64_t*)&fenv_update = (*(uint64_t*)&old & 0xffffffff1fffff00LL) |
39368 (*(uint64_t*)fenv_var 0x1ff80fff);
39369 __builtin_mtfsf (0xff, fenv_update); */
39371 const unsigned HOST_WIDE_INT update_exception_mask
=
39372 HOST_WIDE_INT_C (0xffffffff1fffff00);
39373 const unsigned HOST_WIDE_INT new_exception_mask
=
39374 HOST_WIDE_INT_C (0x1ff80fff);
39376 tree old_fenv
= create_tmp_var_raw (double_type_node
);
39377 tree update_mffs
= build2 (MODIFY_EXPR
, void_type_node
, old_fenv
, call_mffs
);
39379 tree old_llu
= build1 (VIEW_CONVERT_EXPR
, uint64_type_node
, old_fenv
);
39380 tree old_llu_and
= build2 (BIT_AND_EXPR
, uint64_type_node
, old_llu
,
39381 build_int_cst (uint64_type_node
,
39382 update_exception_mask
));
39384 tree new_llu_and
= build2 (BIT_AND_EXPR
, uint64_type_node
, fenv_llu
,
39385 build_int_cst (uint64_type_node
,
39386 new_exception_mask
));
39388 tree new_llu_mask
= build2 (BIT_IOR_EXPR
, uint64_type_node
,
39389 old_llu_and
, new_llu_and
);
39391 tree fenv_update_mtfsf
= build1 (VIEW_CONVERT_EXPR
, double_type_node
,
39394 tree update_mtfsf
= build_call_expr (mtfsf
, 2,
39395 build_int_cst (unsigned_type_node
, 0xff),
39396 fenv_update_mtfsf
);
39398 *update
= build2 (COMPOUND_EXPR
, void_type_node
, update_mffs
, update_mtfsf
);
39402 rs6000_generate_float2_code (bool signed_convert
, rtx dst
, rtx src1
, rtx src2
)
39404 rtx rtx_tmp0
, rtx_tmp1
, rtx_tmp2
, rtx_tmp3
;
39406 rtx_tmp0
= gen_reg_rtx (V2DImode
);
39407 rtx_tmp1
= gen_reg_rtx (V2DImode
);
39409 /* The destination of the vmrgew instruction layout is:
39410 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
39411 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
39412 vmrgew instruction will be correct. */
39413 if (VECTOR_ELT_ORDER_BIG
)
39415 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp0
, src1
, src2
, GEN_INT (0)));
39416 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp1
, src1
, src2
, GEN_INT (3)));
39420 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp0
, src1
, src2
, GEN_INT (3)));
39421 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp1
, src1
, src2
, GEN_INT (0)));
39424 rtx_tmp2
= gen_reg_rtx (V4SFmode
);
39425 rtx_tmp3
= gen_reg_rtx (V4SFmode
);
39427 if (signed_convert
)
39429 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp2
, rtx_tmp0
));
39430 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp3
, rtx_tmp1
));
39434 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp2
, rtx_tmp0
));
39435 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp3
, rtx_tmp1
));
39438 if (VECTOR_ELT_ORDER_BIG
)
39439 emit_insn (gen_p8_vmrgew_v4sf (dst
, rtx_tmp2
, rtx_tmp3
));
39441 emit_insn (gen_p8_vmrgew_v4sf (dst
, rtx_tmp3
, rtx_tmp2
));
39445 rs6000_generate_vsigned2_code (bool signed_convert
, rtx dst
, rtx src1
,
39448 rtx rtx_tmp0
, rtx_tmp1
, rtx_tmp2
, rtx_tmp3
;
39450 rtx_tmp0
= gen_reg_rtx (V2DFmode
);
39451 rtx_tmp1
= gen_reg_rtx (V2DFmode
);
39453 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0
, src1
, src2
, GEN_INT (0)));
39454 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1
, src1
, src2
, GEN_INT (3)));
39456 rtx_tmp2
= gen_reg_rtx (V4SImode
);
39457 rtx_tmp3
= gen_reg_rtx (V4SImode
);
39459 if (signed_convert
)
39461 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp2
, rtx_tmp0
));
39462 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp3
, rtx_tmp1
));
39466 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp2
, rtx_tmp0
));
39467 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp3
, rtx_tmp1
));
39470 emit_insn (gen_p8_vmrgew_v4si (dst
, rtx_tmp2
, rtx_tmp3
));
39473 /* Implement the TARGET_OPTAB_SUPPORTED_P hook. */
39476 rs6000_optab_supported_p (int op
, machine_mode mode1
, machine_mode
,
39477 optimization_type opt_type
)
39482 return (opt_type
== OPTIMIZE_FOR_SPEED
39483 && RS6000_RECIP_AUTO_RSQRTE_P (mode1
));
39490 /* Implement TARGET_CONSTANT_ALIGNMENT. */
39492 static HOST_WIDE_INT
39493 rs6000_constant_alignment (const_tree exp
, HOST_WIDE_INT align
)
39495 if (TREE_CODE (exp
) == STRING_CST
39496 && (STRICT_ALIGNMENT
|| !optimize_size
))
39497 return MAX (align
, BITS_PER_WORD
);
39501 struct gcc_target targetm
= TARGET_INITIALIZER
;
39503 #include "gt-rs6000.h"