rs6000.c (rs6000_gimple_fold_builtin): Add handling for early expansion of vector...
[official-gcc.git] / gcc / config / rs6000 / rs6000.c
blob96bd6e069c3f18dee04581ad773bdf2e600950a2
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2017 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "memmodel.h"
28 #include "gimple.h"
29 #include "cfghooks.h"
30 #include "cfgloop.h"
31 #include "df.h"
32 #include "tm_p.h"
33 #include "stringpool.h"
34 #include "expmed.h"
35 #include "optabs.h"
36 #include "regs.h"
37 #include "ira.h"
38 #include "recog.h"
39 #include "cgraph.h"
40 #include "diagnostic-core.h"
41 #include "insn-attr.h"
42 #include "flags.h"
43 #include "alias.h"
44 #include "fold-const.h"
45 #include "stor-layout.h"
46 #include "calls.h"
47 #include "print-tree.h"
48 #include "varasm.h"
49 #include "explow.h"
50 #include "expr.h"
51 #include "output.h"
52 #include "dbxout.h"
53 #include "common/common-target.h"
54 #include "langhooks.h"
55 #include "reload.h"
56 #include "sched-int.h"
57 #include "gimplify.h"
58 #include "gimple-fold.h"
59 #include "gimple-iterator.h"
60 #include "gimple-ssa.h"
61 #include "gimple-walk.h"
62 #include "intl.h"
63 #include "params.h"
64 #include "tm-constrs.h"
65 #include "tree-vectorizer.h"
66 #include "target-globals.h"
67 #include "builtins.h"
68 #include "context.h"
69 #include "tree-pass.h"
70 #include "except.h"
71 #if TARGET_XCOFF
72 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
73 #endif
74 #if TARGET_MACHO
75 #include "gstab.h" /* for N_SLINE */
76 #endif
77 #include "case-cfn-macros.h"
78 #include "ppc-auxv.h"
80 /* This file should be included last. */
81 #include "target-def.h"
83 #ifndef TARGET_NO_PROTOTYPE
84 #define TARGET_NO_PROTOTYPE 0
85 #endif
87 #define min(A,B) ((A) < (B) ? (A) : (B))
88 #define max(A,B) ((A) > (B) ? (A) : (B))
90 /* Structure used to define the rs6000 stack */
91 typedef struct rs6000_stack {
92 int reload_completed; /* stack info won't change from here on */
93 int first_gp_reg_save; /* first callee saved GP register used */
94 int first_fp_reg_save; /* first callee saved FP register used */
95 int first_altivec_reg_save; /* first callee saved AltiVec register used */
96 int lr_save_p; /* true if the link reg needs to be saved */
97 int cr_save_p; /* true if the CR reg needs to be saved */
98 unsigned int vrsave_mask; /* mask of vec registers to save */
99 int push_p; /* true if we need to allocate stack space */
100 int calls_p; /* true if the function makes any calls */
101 int world_save_p; /* true if we're saving *everything*:
102 r13-r31, cr, f14-f31, vrsave, v20-v31 */
103 enum rs6000_abi abi; /* which ABI to use */
104 int gp_save_offset; /* offset to save GP regs from initial SP */
105 int fp_save_offset; /* offset to save FP regs from initial SP */
106 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
107 int lr_save_offset; /* offset to save LR from initial SP */
108 int cr_save_offset; /* offset to save CR from initial SP */
109 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
110 int spe_gp_save_offset; /* offset to save spe 64-bit gprs */
111 int varargs_save_offset; /* offset to save the varargs registers */
112 int ehrd_offset; /* offset to EH return data */
113 int ehcr_offset; /* offset to EH CR field data */
114 int reg_size; /* register size (4 or 8) */
115 HOST_WIDE_INT vars_size; /* variable save area size */
116 int parm_size; /* outgoing parameter size */
117 int save_size; /* save area size */
118 int fixed_size; /* fixed size of stack frame */
119 int gp_size; /* size of saved GP registers */
120 int fp_size; /* size of saved FP registers */
121 int altivec_size; /* size of saved AltiVec registers */
122 int cr_size; /* size to hold CR if not in fixed area */
123 int vrsave_size; /* size to hold VRSAVE */
124 int altivec_padding_size; /* size of altivec alignment padding */
125 int spe_gp_size; /* size of 64-bit GPR save size for SPE */
126 int spe_padding_size;
127 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
128 int spe_64bit_regs_used;
129 int savres_strategy;
130 } rs6000_stack_t;
132 /* A C structure for machine-specific, per-function data.
133 This is added to the cfun structure. */
134 typedef struct GTY(()) machine_function
136 /* Whether the instruction chain has been scanned already. */
137 int spe_insn_chain_scanned_p;
138 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
139 int ra_needs_full_frame;
140 /* Flags if __builtin_return_address (0) was used. */
141 int ra_need_lr;
142 /* Cache lr_save_p after expansion of builtin_eh_return. */
143 int lr_save_state;
144 /* Whether we need to save the TOC to the reserved stack location in the
145 function prologue. */
146 bool save_toc_in_prologue;
147 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
148 varargs save area. */
149 HOST_WIDE_INT varargs_save_offset;
150 /* Temporary stack slot to use for SDmode copies. This slot is
151 64-bits wide and is allocated early enough so that the offset
152 does not overflow the 16-bit load/store offset field. */
153 rtx sdmode_stack_slot;
154 /* Alternative internal arg pointer for -fsplit-stack. */
155 rtx split_stack_arg_pointer;
156 bool split_stack_argp_used;
157 /* Flag if r2 setup is needed with ELFv2 ABI. */
158 bool r2_setup_needed;
159 /* The number of components we use for separate shrink-wrapping. */
160 int n_components;
161 /* The components already handled by separate shrink-wrapping, which should
162 not be considered by the prologue and epilogue. */
163 bool gpr_is_wrapped_separately[32];
164 bool fpr_is_wrapped_separately[32];
165 bool lr_is_wrapped_separately;
166 } machine_function;
168 /* Support targetm.vectorize.builtin_mask_for_load. */
169 static GTY(()) tree altivec_builtin_mask_for_load;
171 /* Set to nonzero once AIX common-mode calls have been defined. */
172 static GTY(()) int common_mode_defined;
174 /* Label number of label created for -mrelocatable, to call to so we can
175 get the address of the GOT section */
176 static int rs6000_pic_labelno;
178 #ifdef USING_ELFOS_H
179 /* Counter for labels which are to be placed in .fixup. */
180 int fixuplabelno = 0;
181 #endif
183 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
184 int dot_symbols;
186 /* Specify the machine mode that pointers have. After generation of rtl, the
187 compiler makes no further distinction between pointers and any other objects
188 of this machine mode. The type is unsigned since not all things that
189 include rs6000.h also include machmode.h. */
190 unsigned rs6000_pmode;
192 /* Width in bits of a pointer. */
193 unsigned rs6000_pointer_size;
195 #ifdef HAVE_AS_GNU_ATTRIBUTE
196 # ifndef HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
197 # define HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE 0
198 # endif
199 /* Flag whether floating point values have been passed/returned.
200 Note that this doesn't say whether fprs are used, since the
201 Tag_GNU_Power_ABI_FP .gnu.attributes value this flag controls
202 should be set for soft-float values passed in gprs and ieee128
203 values passed in vsx registers. */
204 static bool rs6000_passes_float;
205 static bool rs6000_passes_long_double;
206 /* Flag whether vector values have been passed/returned. */
207 static bool rs6000_passes_vector;
208 /* Flag whether small (<= 8 byte) structures have been returned. */
209 static bool rs6000_returns_struct;
210 #endif
212 /* Value is TRUE if register/mode pair is acceptable. */
213 bool rs6000_hard_regno_mode_ok_p[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
215 /* Maximum number of registers needed for a given register class and mode. */
216 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
218 /* How many registers are needed for a given register and mode. */
219 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
221 /* Map register number to register class. */
222 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
224 static int dbg_cost_ctrl;
226 /* Built in types. */
227 tree rs6000_builtin_types[RS6000_BTI_MAX];
228 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
230 /* Flag to say the TOC is initialized */
231 int toc_initialized, need_toc_init;
232 char toc_label_name[10];
234 /* Cached value of rs6000_variable_issue. This is cached in
235 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
236 static short cached_can_issue_more;
238 static GTY(()) section *read_only_data_section;
239 static GTY(()) section *private_data_section;
240 static GTY(()) section *tls_data_section;
241 static GTY(()) section *tls_private_data_section;
242 static GTY(()) section *read_only_private_data_section;
243 static GTY(()) section *sdata2_section;
244 static GTY(()) section *toc_section;
246 struct builtin_description
248 const HOST_WIDE_INT mask;
249 const enum insn_code icode;
250 const char *const name;
251 const enum rs6000_builtins code;
254 /* Describe the vector unit used for modes. */
255 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
256 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
258 /* Register classes for various constraints that are based on the target
259 switches. */
260 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
262 /* Describe the alignment of a vector. */
263 int rs6000_vector_align[NUM_MACHINE_MODES];
265 /* Map selected modes to types for builtins. */
266 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
268 /* What modes to automatically generate reciprocal divide estimate (fre) and
269 reciprocal sqrt (frsqrte) for. */
270 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
272 /* Masks to determine which reciprocal esitmate instructions to generate
273 automatically. */
274 enum rs6000_recip_mask {
275 RECIP_SF_DIV = 0x001, /* Use divide estimate */
276 RECIP_DF_DIV = 0x002,
277 RECIP_V4SF_DIV = 0x004,
278 RECIP_V2DF_DIV = 0x008,
280 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
281 RECIP_DF_RSQRT = 0x020,
282 RECIP_V4SF_RSQRT = 0x040,
283 RECIP_V2DF_RSQRT = 0x080,
285 /* Various combination of flags for -mrecip=xxx. */
286 RECIP_NONE = 0,
287 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
288 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
289 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
291 RECIP_HIGH_PRECISION = RECIP_ALL,
293 /* On low precision machines like the power5, don't enable double precision
294 reciprocal square root estimate, since it isn't accurate enough. */
295 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
298 /* -mrecip options. */
299 static struct
301 const char *string; /* option name */
302 unsigned int mask; /* mask bits to set */
303 } recip_options[] = {
304 { "all", RECIP_ALL },
305 { "none", RECIP_NONE },
306 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
307 | RECIP_V2DF_DIV) },
308 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
309 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
310 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
311 | RECIP_V2DF_RSQRT) },
312 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
313 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
316 /* Used by __builtin_cpu_is(), mapping from PLATFORM names to values. */
317 static const struct
319 const char *cpu;
320 unsigned int cpuid;
321 } cpu_is_info[] = {
322 { "power9", PPC_PLATFORM_POWER9 },
323 { "power8", PPC_PLATFORM_POWER8 },
324 { "power7", PPC_PLATFORM_POWER7 },
325 { "power6x", PPC_PLATFORM_POWER6X },
326 { "power6", PPC_PLATFORM_POWER6 },
327 { "power5+", PPC_PLATFORM_POWER5_PLUS },
328 { "power5", PPC_PLATFORM_POWER5 },
329 { "ppc970", PPC_PLATFORM_PPC970 },
330 { "power4", PPC_PLATFORM_POWER4 },
331 { "ppca2", PPC_PLATFORM_PPCA2 },
332 { "ppc476", PPC_PLATFORM_PPC476 },
333 { "ppc464", PPC_PLATFORM_PPC464 },
334 { "ppc440", PPC_PLATFORM_PPC440 },
335 { "ppc405", PPC_PLATFORM_PPC405 },
336 { "ppc-cell-be", PPC_PLATFORM_CELL_BE }
339 /* Used by __builtin_cpu_supports(), mapping from HWCAP names to masks. */
340 static const struct
342 const char *hwcap;
343 int mask;
344 unsigned int id;
345 } cpu_supports_info[] = {
346 /* AT_HWCAP masks. */
347 { "4xxmac", PPC_FEATURE_HAS_4xxMAC, 0 },
348 { "altivec", PPC_FEATURE_HAS_ALTIVEC, 0 },
349 { "arch_2_05", PPC_FEATURE_ARCH_2_05, 0 },
350 { "arch_2_06", PPC_FEATURE_ARCH_2_06, 0 },
351 { "archpmu", PPC_FEATURE_PERFMON_COMPAT, 0 },
352 { "booke", PPC_FEATURE_BOOKE, 0 },
353 { "cellbe", PPC_FEATURE_CELL_BE, 0 },
354 { "dfp", PPC_FEATURE_HAS_DFP, 0 },
355 { "efpdouble", PPC_FEATURE_HAS_EFP_DOUBLE, 0 },
356 { "efpsingle", PPC_FEATURE_HAS_EFP_SINGLE, 0 },
357 { "fpu", PPC_FEATURE_HAS_FPU, 0 },
358 { "ic_snoop", PPC_FEATURE_ICACHE_SNOOP, 0 },
359 { "mmu", PPC_FEATURE_HAS_MMU, 0 },
360 { "notb", PPC_FEATURE_NO_TB, 0 },
361 { "pa6t", PPC_FEATURE_PA6T, 0 },
362 { "power4", PPC_FEATURE_POWER4, 0 },
363 { "power5", PPC_FEATURE_POWER5, 0 },
364 { "power5+", PPC_FEATURE_POWER5_PLUS, 0 },
365 { "power6x", PPC_FEATURE_POWER6_EXT, 0 },
366 { "ppc32", PPC_FEATURE_32, 0 },
367 { "ppc601", PPC_FEATURE_601_INSTR, 0 },
368 { "ppc64", PPC_FEATURE_64, 0 },
369 { "ppcle", PPC_FEATURE_PPC_LE, 0 },
370 { "smt", PPC_FEATURE_SMT, 0 },
371 { "spe", PPC_FEATURE_HAS_SPE, 0 },
372 { "true_le", PPC_FEATURE_TRUE_LE, 0 },
373 { "ucache", PPC_FEATURE_UNIFIED_CACHE, 0 },
374 { "vsx", PPC_FEATURE_HAS_VSX, 0 },
376 /* AT_HWCAP2 masks. */
377 { "arch_2_07", PPC_FEATURE2_ARCH_2_07, 1 },
378 { "dscr", PPC_FEATURE2_HAS_DSCR, 1 },
379 { "ebb", PPC_FEATURE2_HAS_EBB, 1 },
380 { "htm", PPC_FEATURE2_HAS_HTM, 1 },
381 { "htm-nosc", PPC_FEATURE2_HTM_NOSC, 1 },
382 { "isel", PPC_FEATURE2_HAS_ISEL, 1 },
383 { "tar", PPC_FEATURE2_HAS_TAR, 1 },
384 { "vcrypto", PPC_FEATURE2_HAS_VEC_CRYPTO, 1 },
385 { "arch_3_00", PPC_FEATURE2_ARCH_3_00, 1 },
386 { "ieee128", PPC_FEATURE2_HAS_IEEE128, 1 }
389 /* Newer LIBCs explicitly export this symbol to declare that they provide
390 the AT_PLATFORM and AT_HWCAP/AT_HWCAP2 values in the TCB. We emit a
391 reference to this symbol whenever we expand a CPU builtin, so that
392 we never link against an old LIBC. */
393 const char *tcb_verification_symbol = "__parse_hwcap_and_convert_at_platform";
395 /* True if we have expanded a CPU builtin. */
396 bool cpu_builtin_p;
398 /* Pointer to function (in rs6000-c.c) that can define or undefine target
399 macros that have changed. Languages that don't support the preprocessor
400 don't link in rs6000-c.c, so we can't call it directly. */
401 void (*rs6000_target_modify_macros_ptr) (bool, HOST_WIDE_INT, HOST_WIDE_INT);
403 /* Simplfy register classes into simpler classifications. We assume
404 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
405 check for standard register classes (gpr/floating/altivec/vsx) and
406 floating/vector classes (float/altivec/vsx). */
408 enum rs6000_reg_type {
409 NO_REG_TYPE,
410 PSEUDO_REG_TYPE,
411 GPR_REG_TYPE,
412 VSX_REG_TYPE,
413 ALTIVEC_REG_TYPE,
414 FPR_REG_TYPE,
415 SPR_REG_TYPE,
416 CR_REG_TYPE,
417 SPE_ACC_TYPE,
418 SPEFSCR_REG_TYPE
421 /* Map register class to register type. */
422 static enum rs6000_reg_type reg_class_to_reg_type[N_REG_CLASSES];
424 /* First/last register type for the 'normal' register types (i.e. general
425 purpose, floating point, altivec, and VSX registers). */
426 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
428 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
431 /* Register classes we care about in secondary reload or go if legitimate
432 address. We only need to worry about GPR, FPR, and Altivec registers here,
433 along an ANY field that is the OR of the 3 register classes. */
435 enum rs6000_reload_reg_type {
436 RELOAD_REG_GPR, /* General purpose registers. */
437 RELOAD_REG_FPR, /* Traditional floating point regs. */
438 RELOAD_REG_VMX, /* Altivec (VMX) registers. */
439 RELOAD_REG_ANY, /* OR of GPR, FPR, Altivec masks. */
440 N_RELOAD_REG
443 /* For setting up register classes, loop through the 3 register classes mapping
444 into real registers, and skip the ANY class, which is just an OR of the
445 bits. */
446 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
447 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
449 /* Map reload register type to a register in the register class. */
450 struct reload_reg_map_type {
451 const char *name; /* Register class name. */
452 int reg; /* Register in the register class. */
455 static const struct reload_reg_map_type reload_reg_map[N_RELOAD_REG] = {
456 { "Gpr", FIRST_GPR_REGNO }, /* RELOAD_REG_GPR. */
457 { "Fpr", FIRST_FPR_REGNO }, /* RELOAD_REG_FPR. */
458 { "VMX", FIRST_ALTIVEC_REGNO }, /* RELOAD_REG_VMX. */
459 { "Any", -1 }, /* RELOAD_REG_ANY. */
462 /* Mask bits for each register class, indexed per mode. Historically the
463 compiler has been more restrictive which types can do PRE_MODIFY instead of
464 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
465 typedef unsigned char addr_mask_type;
467 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
468 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
469 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
470 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
471 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
472 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
473 #define RELOAD_REG_AND_M16 0x40 /* AND -16 addressing. */
474 #define RELOAD_REG_QUAD_OFFSET 0x80 /* quad offset is limited. */
476 /* Register type masks based on the type, of valid addressing modes. */
477 struct rs6000_reg_addr {
478 enum insn_code reload_load; /* INSN to reload for loading. */
479 enum insn_code reload_store; /* INSN to reload for storing. */
480 enum insn_code reload_fpr_gpr; /* INSN to move from FPR to GPR. */
481 enum insn_code reload_gpr_vsx; /* INSN to move from GPR to VSX. */
482 enum insn_code reload_vsx_gpr; /* INSN to move from VSX to GPR. */
483 enum insn_code fusion_gpr_ld; /* INSN for fusing gpr ADDIS/loads. */
484 /* INSNs for fusing addi with loads
485 or stores for each reg. class. */
486 enum insn_code fusion_addi_ld[(int)N_RELOAD_REG];
487 enum insn_code fusion_addi_st[(int)N_RELOAD_REG];
488 /* INSNs for fusing addis with loads
489 or stores for each reg. class. */
490 enum insn_code fusion_addis_ld[(int)N_RELOAD_REG];
491 enum insn_code fusion_addis_st[(int)N_RELOAD_REG];
492 addr_mask_type addr_mask[(int)N_RELOAD_REG]; /* Valid address masks. */
493 bool scalar_in_vmx_p; /* Scalar value can go in VMX. */
494 bool fused_toc; /* Mode supports TOC fusion. */
497 static struct rs6000_reg_addr reg_addr[NUM_MACHINE_MODES];
499 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
500 static inline bool
501 mode_supports_pre_incdec_p (machine_mode mode)
503 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_INCDEC)
504 != 0);
507 /* Helper function to say whether a mode supports PRE_MODIFY. */
508 static inline bool
509 mode_supports_pre_modify_p (machine_mode mode)
511 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_MODIFY)
512 != 0);
515 /* Given that there exists at least one variable that is set (produced)
516 by OUT_INSN and read (consumed) by IN_INSN, return true iff
517 IN_INSN represents one or more memory store operations and none of
518 the variables set by OUT_INSN is used by IN_INSN as the address of a
519 store operation. If either IN_INSN or OUT_INSN does not represent
520 a "single" RTL SET expression (as loosely defined by the
521 implementation of the single_set function) or a PARALLEL with only
522 SETs, CLOBBERs, and USEs inside, this function returns false.
524 This rs6000-specific version of store_data_bypass_p checks for
525 certain conditions that result in assertion failures (and internal
526 compiler errors) in the generic store_data_bypass_p function and
527 returns false rather than calling store_data_bypass_p if one of the
528 problematic conditions is detected. */
531 rs6000_store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
533 rtx out_set, in_set;
534 rtx out_pat, in_pat;
535 rtx out_exp, in_exp;
536 int i, j;
538 in_set = single_set (in_insn);
539 if (in_set)
541 if (MEM_P (SET_DEST (in_set)))
543 out_set = single_set (out_insn);
544 if (!out_set)
546 out_pat = PATTERN (out_insn);
547 if (GET_CODE (out_pat) == PARALLEL)
549 for (i = 0; i < XVECLEN (out_pat, 0); i++)
551 out_exp = XVECEXP (out_pat, 0, i);
552 if ((GET_CODE (out_exp) == CLOBBER)
553 || (GET_CODE (out_exp) == USE))
554 continue;
555 else if (GET_CODE (out_exp) != SET)
556 return false;
562 else
564 in_pat = PATTERN (in_insn);
565 if (GET_CODE (in_pat) != PARALLEL)
566 return false;
568 for (i = 0; i < XVECLEN (in_pat, 0); i++)
570 in_exp = XVECEXP (in_pat, 0, i);
571 if ((GET_CODE (in_exp) == CLOBBER) || (GET_CODE (in_exp) == USE))
572 continue;
573 else if (GET_CODE (in_exp) != SET)
574 return false;
576 if (MEM_P (SET_DEST (in_exp)))
578 out_set = single_set (out_insn);
579 if (!out_set)
581 out_pat = PATTERN (out_insn);
582 if (GET_CODE (out_pat) != PARALLEL)
583 return false;
584 for (j = 0; j < XVECLEN (out_pat, 0); j++)
586 out_exp = XVECEXP (out_pat, 0, j);
587 if ((GET_CODE (out_exp) == CLOBBER)
588 || (GET_CODE (out_exp) == USE))
589 continue;
590 else if (GET_CODE (out_exp) != SET)
591 return false;
597 return store_data_bypass_p (out_insn, in_insn);
600 /* Return true if we have D-form addressing in altivec registers. */
601 static inline bool
602 mode_supports_vmx_dform (machine_mode mode)
604 return ((reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_OFFSET) != 0);
607 /* Return true if we have D-form addressing in VSX registers. This addressing
608 is more limited than normal d-form addressing in that the offset must be
609 aligned on a 16-byte boundary. */
610 static inline bool
611 mode_supports_vsx_dform_quad (machine_mode mode)
613 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_QUAD_OFFSET)
614 != 0);
618 /* Target cpu costs. */
620 struct processor_costs {
621 const int mulsi; /* cost of SImode multiplication. */
622 const int mulsi_const; /* cost of SImode multiplication by constant. */
623 const int mulsi_const9; /* cost of SImode mult by short constant. */
624 const int muldi; /* cost of DImode multiplication. */
625 const int divsi; /* cost of SImode division. */
626 const int divdi; /* cost of DImode division. */
627 const int fp; /* cost of simple SFmode and DFmode insns. */
628 const int dmul; /* cost of DFmode multiplication (and fmadd). */
629 const int sdiv; /* cost of SFmode division (fdivs). */
630 const int ddiv; /* cost of DFmode division (fdiv). */
631 const int cache_line_size; /* cache line size in bytes. */
632 const int l1_cache_size; /* size of l1 cache, in kilobytes. */
633 const int l2_cache_size; /* size of l2 cache, in kilobytes. */
634 const int simultaneous_prefetches; /* number of parallel prefetch
635 operations. */
636 const int sfdf_convert; /* cost of SF->DF conversion. */
639 const struct processor_costs *rs6000_cost;
641 /* Processor costs (relative to an add) */
643 /* Instruction size costs on 32bit processors. */
644 static const
645 struct processor_costs size32_cost = {
646 COSTS_N_INSNS (1), /* mulsi */
647 COSTS_N_INSNS (1), /* mulsi_const */
648 COSTS_N_INSNS (1), /* mulsi_const9 */
649 COSTS_N_INSNS (1), /* muldi */
650 COSTS_N_INSNS (1), /* divsi */
651 COSTS_N_INSNS (1), /* divdi */
652 COSTS_N_INSNS (1), /* fp */
653 COSTS_N_INSNS (1), /* dmul */
654 COSTS_N_INSNS (1), /* sdiv */
655 COSTS_N_INSNS (1), /* ddiv */
656 32, /* cache line size */
657 0, /* l1 cache */
658 0, /* l2 cache */
659 0, /* streams */
660 0, /* SF->DF convert */
663 /* Instruction size costs on 64bit processors. */
664 static const
665 struct processor_costs size64_cost = {
666 COSTS_N_INSNS (1), /* mulsi */
667 COSTS_N_INSNS (1), /* mulsi_const */
668 COSTS_N_INSNS (1), /* mulsi_const9 */
669 COSTS_N_INSNS (1), /* muldi */
670 COSTS_N_INSNS (1), /* divsi */
671 COSTS_N_INSNS (1), /* divdi */
672 COSTS_N_INSNS (1), /* fp */
673 COSTS_N_INSNS (1), /* dmul */
674 COSTS_N_INSNS (1), /* sdiv */
675 COSTS_N_INSNS (1), /* ddiv */
676 128, /* cache line size */
677 0, /* l1 cache */
678 0, /* l2 cache */
679 0, /* streams */
680 0, /* SF->DF convert */
683 /* Instruction costs on RS64A processors. */
684 static const
685 struct processor_costs rs64a_cost = {
686 COSTS_N_INSNS (20), /* mulsi */
687 COSTS_N_INSNS (12), /* mulsi_const */
688 COSTS_N_INSNS (8), /* mulsi_const9 */
689 COSTS_N_INSNS (34), /* muldi */
690 COSTS_N_INSNS (65), /* divsi */
691 COSTS_N_INSNS (67), /* divdi */
692 COSTS_N_INSNS (4), /* fp */
693 COSTS_N_INSNS (4), /* dmul */
694 COSTS_N_INSNS (31), /* sdiv */
695 COSTS_N_INSNS (31), /* ddiv */
696 128, /* cache line size */
697 128, /* l1 cache */
698 2048, /* l2 cache */
699 1, /* streams */
700 0, /* SF->DF convert */
703 /* Instruction costs on MPCCORE processors. */
704 static const
705 struct processor_costs mpccore_cost = {
706 COSTS_N_INSNS (2), /* mulsi */
707 COSTS_N_INSNS (2), /* mulsi_const */
708 COSTS_N_INSNS (2), /* mulsi_const9 */
709 COSTS_N_INSNS (2), /* muldi */
710 COSTS_N_INSNS (6), /* divsi */
711 COSTS_N_INSNS (6), /* divdi */
712 COSTS_N_INSNS (4), /* fp */
713 COSTS_N_INSNS (5), /* dmul */
714 COSTS_N_INSNS (10), /* sdiv */
715 COSTS_N_INSNS (17), /* ddiv */
716 32, /* cache line size */
717 4, /* l1 cache */
718 16, /* l2 cache */
719 1, /* streams */
720 0, /* SF->DF convert */
723 /* Instruction costs on PPC403 processors. */
724 static const
725 struct processor_costs ppc403_cost = {
726 COSTS_N_INSNS (4), /* mulsi */
727 COSTS_N_INSNS (4), /* mulsi_const */
728 COSTS_N_INSNS (4), /* mulsi_const9 */
729 COSTS_N_INSNS (4), /* muldi */
730 COSTS_N_INSNS (33), /* divsi */
731 COSTS_N_INSNS (33), /* divdi */
732 COSTS_N_INSNS (11), /* fp */
733 COSTS_N_INSNS (11), /* dmul */
734 COSTS_N_INSNS (11), /* sdiv */
735 COSTS_N_INSNS (11), /* ddiv */
736 32, /* cache line size */
737 4, /* l1 cache */
738 16, /* l2 cache */
739 1, /* streams */
740 0, /* SF->DF convert */
743 /* Instruction costs on PPC405 processors. */
744 static const
745 struct processor_costs ppc405_cost = {
746 COSTS_N_INSNS (5), /* mulsi */
747 COSTS_N_INSNS (4), /* mulsi_const */
748 COSTS_N_INSNS (3), /* mulsi_const9 */
749 COSTS_N_INSNS (5), /* muldi */
750 COSTS_N_INSNS (35), /* divsi */
751 COSTS_N_INSNS (35), /* divdi */
752 COSTS_N_INSNS (11), /* fp */
753 COSTS_N_INSNS (11), /* dmul */
754 COSTS_N_INSNS (11), /* sdiv */
755 COSTS_N_INSNS (11), /* ddiv */
756 32, /* cache line size */
757 16, /* l1 cache */
758 128, /* l2 cache */
759 1, /* streams */
760 0, /* SF->DF convert */
763 /* Instruction costs on PPC440 processors. */
764 static const
765 struct processor_costs ppc440_cost = {
766 COSTS_N_INSNS (3), /* mulsi */
767 COSTS_N_INSNS (2), /* mulsi_const */
768 COSTS_N_INSNS (2), /* mulsi_const9 */
769 COSTS_N_INSNS (3), /* muldi */
770 COSTS_N_INSNS (34), /* divsi */
771 COSTS_N_INSNS (34), /* divdi */
772 COSTS_N_INSNS (5), /* fp */
773 COSTS_N_INSNS (5), /* dmul */
774 COSTS_N_INSNS (19), /* sdiv */
775 COSTS_N_INSNS (33), /* ddiv */
776 32, /* cache line size */
777 32, /* l1 cache */
778 256, /* l2 cache */
779 1, /* streams */
780 0, /* SF->DF convert */
783 /* Instruction costs on PPC476 processors. */
784 static const
785 struct processor_costs ppc476_cost = {
786 COSTS_N_INSNS (4), /* mulsi */
787 COSTS_N_INSNS (4), /* mulsi_const */
788 COSTS_N_INSNS (4), /* mulsi_const9 */
789 COSTS_N_INSNS (4), /* muldi */
790 COSTS_N_INSNS (11), /* divsi */
791 COSTS_N_INSNS (11), /* divdi */
792 COSTS_N_INSNS (6), /* fp */
793 COSTS_N_INSNS (6), /* dmul */
794 COSTS_N_INSNS (19), /* sdiv */
795 COSTS_N_INSNS (33), /* ddiv */
796 32, /* l1 cache line size */
797 32, /* l1 cache */
798 512, /* l2 cache */
799 1, /* streams */
800 0, /* SF->DF convert */
803 /* Instruction costs on PPC601 processors. */
804 static const
805 struct processor_costs ppc601_cost = {
806 COSTS_N_INSNS (5), /* mulsi */
807 COSTS_N_INSNS (5), /* mulsi_const */
808 COSTS_N_INSNS (5), /* mulsi_const9 */
809 COSTS_N_INSNS (5), /* muldi */
810 COSTS_N_INSNS (36), /* divsi */
811 COSTS_N_INSNS (36), /* divdi */
812 COSTS_N_INSNS (4), /* fp */
813 COSTS_N_INSNS (5), /* dmul */
814 COSTS_N_INSNS (17), /* sdiv */
815 COSTS_N_INSNS (31), /* ddiv */
816 32, /* cache line size */
817 32, /* l1 cache */
818 256, /* l2 cache */
819 1, /* streams */
820 0, /* SF->DF convert */
823 /* Instruction costs on PPC603 processors. */
824 static const
825 struct processor_costs ppc603_cost = {
826 COSTS_N_INSNS (5), /* mulsi */
827 COSTS_N_INSNS (3), /* mulsi_const */
828 COSTS_N_INSNS (2), /* mulsi_const9 */
829 COSTS_N_INSNS (5), /* muldi */
830 COSTS_N_INSNS (37), /* divsi */
831 COSTS_N_INSNS (37), /* divdi */
832 COSTS_N_INSNS (3), /* fp */
833 COSTS_N_INSNS (4), /* dmul */
834 COSTS_N_INSNS (18), /* sdiv */
835 COSTS_N_INSNS (33), /* ddiv */
836 32, /* cache line size */
837 8, /* l1 cache */
838 64, /* l2 cache */
839 1, /* streams */
840 0, /* SF->DF convert */
843 /* Instruction costs on PPC604 processors. */
844 static const
845 struct processor_costs ppc604_cost = {
846 COSTS_N_INSNS (4), /* mulsi */
847 COSTS_N_INSNS (4), /* mulsi_const */
848 COSTS_N_INSNS (4), /* mulsi_const9 */
849 COSTS_N_INSNS (4), /* muldi */
850 COSTS_N_INSNS (20), /* divsi */
851 COSTS_N_INSNS (20), /* divdi */
852 COSTS_N_INSNS (3), /* fp */
853 COSTS_N_INSNS (3), /* dmul */
854 COSTS_N_INSNS (18), /* sdiv */
855 COSTS_N_INSNS (32), /* ddiv */
856 32, /* cache line size */
857 16, /* l1 cache */
858 512, /* l2 cache */
859 1, /* streams */
860 0, /* SF->DF convert */
863 /* Instruction costs on PPC604e processors. */
864 static const
865 struct processor_costs ppc604e_cost = {
866 COSTS_N_INSNS (2), /* mulsi */
867 COSTS_N_INSNS (2), /* mulsi_const */
868 COSTS_N_INSNS (2), /* mulsi_const9 */
869 COSTS_N_INSNS (2), /* muldi */
870 COSTS_N_INSNS (20), /* divsi */
871 COSTS_N_INSNS (20), /* divdi */
872 COSTS_N_INSNS (3), /* fp */
873 COSTS_N_INSNS (3), /* dmul */
874 COSTS_N_INSNS (18), /* sdiv */
875 COSTS_N_INSNS (32), /* ddiv */
876 32, /* cache line size */
877 32, /* l1 cache */
878 1024, /* l2 cache */
879 1, /* streams */
880 0, /* SF->DF convert */
883 /* Instruction costs on PPC620 processors. */
884 static const
885 struct processor_costs ppc620_cost = {
886 COSTS_N_INSNS (5), /* mulsi */
887 COSTS_N_INSNS (4), /* mulsi_const */
888 COSTS_N_INSNS (3), /* mulsi_const9 */
889 COSTS_N_INSNS (7), /* muldi */
890 COSTS_N_INSNS (21), /* divsi */
891 COSTS_N_INSNS (37), /* divdi */
892 COSTS_N_INSNS (3), /* fp */
893 COSTS_N_INSNS (3), /* dmul */
894 COSTS_N_INSNS (18), /* sdiv */
895 COSTS_N_INSNS (32), /* ddiv */
896 128, /* cache line size */
897 32, /* l1 cache */
898 1024, /* l2 cache */
899 1, /* streams */
900 0, /* SF->DF convert */
903 /* Instruction costs on PPC630 processors. */
904 static const
905 struct processor_costs ppc630_cost = {
906 COSTS_N_INSNS (5), /* mulsi */
907 COSTS_N_INSNS (4), /* mulsi_const */
908 COSTS_N_INSNS (3), /* mulsi_const9 */
909 COSTS_N_INSNS (7), /* muldi */
910 COSTS_N_INSNS (21), /* divsi */
911 COSTS_N_INSNS (37), /* divdi */
912 COSTS_N_INSNS (3), /* fp */
913 COSTS_N_INSNS (3), /* dmul */
914 COSTS_N_INSNS (17), /* sdiv */
915 COSTS_N_INSNS (21), /* ddiv */
916 128, /* cache line size */
917 64, /* l1 cache */
918 1024, /* l2 cache */
919 1, /* streams */
920 0, /* SF->DF convert */
923 /* Instruction costs on Cell processor. */
924 /* COSTS_N_INSNS (1) ~ one add. */
925 static const
926 struct processor_costs ppccell_cost = {
927 COSTS_N_INSNS (9/2)+2, /* mulsi */
928 COSTS_N_INSNS (6/2), /* mulsi_const */
929 COSTS_N_INSNS (6/2), /* mulsi_const9 */
930 COSTS_N_INSNS (15/2)+2, /* muldi */
931 COSTS_N_INSNS (38/2), /* divsi */
932 COSTS_N_INSNS (70/2), /* divdi */
933 COSTS_N_INSNS (10/2), /* fp */
934 COSTS_N_INSNS (10/2), /* dmul */
935 COSTS_N_INSNS (74/2), /* sdiv */
936 COSTS_N_INSNS (74/2), /* ddiv */
937 128, /* cache line size */
938 32, /* l1 cache */
939 512, /* l2 cache */
940 6, /* streams */
941 0, /* SF->DF convert */
944 /* Instruction costs on PPC750 and PPC7400 processors. */
945 static const
946 struct processor_costs ppc750_cost = {
947 COSTS_N_INSNS (5), /* mulsi */
948 COSTS_N_INSNS (3), /* mulsi_const */
949 COSTS_N_INSNS (2), /* mulsi_const9 */
950 COSTS_N_INSNS (5), /* muldi */
951 COSTS_N_INSNS (17), /* divsi */
952 COSTS_N_INSNS (17), /* divdi */
953 COSTS_N_INSNS (3), /* fp */
954 COSTS_N_INSNS (3), /* dmul */
955 COSTS_N_INSNS (17), /* sdiv */
956 COSTS_N_INSNS (31), /* ddiv */
957 32, /* cache line size */
958 32, /* l1 cache */
959 512, /* l2 cache */
960 1, /* streams */
961 0, /* SF->DF convert */
964 /* Instruction costs on PPC7450 processors. */
965 static const
966 struct processor_costs ppc7450_cost = {
967 COSTS_N_INSNS (4), /* mulsi */
968 COSTS_N_INSNS (3), /* mulsi_const */
969 COSTS_N_INSNS (3), /* mulsi_const9 */
970 COSTS_N_INSNS (4), /* muldi */
971 COSTS_N_INSNS (23), /* divsi */
972 COSTS_N_INSNS (23), /* divdi */
973 COSTS_N_INSNS (5), /* fp */
974 COSTS_N_INSNS (5), /* dmul */
975 COSTS_N_INSNS (21), /* sdiv */
976 COSTS_N_INSNS (35), /* ddiv */
977 32, /* cache line size */
978 32, /* l1 cache */
979 1024, /* l2 cache */
980 1, /* streams */
981 0, /* SF->DF convert */
984 /* Instruction costs on PPC8540 processors. */
985 static const
986 struct processor_costs ppc8540_cost = {
987 COSTS_N_INSNS (4), /* mulsi */
988 COSTS_N_INSNS (4), /* mulsi_const */
989 COSTS_N_INSNS (4), /* mulsi_const9 */
990 COSTS_N_INSNS (4), /* muldi */
991 COSTS_N_INSNS (19), /* divsi */
992 COSTS_N_INSNS (19), /* divdi */
993 COSTS_N_INSNS (4), /* fp */
994 COSTS_N_INSNS (4), /* dmul */
995 COSTS_N_INSNS (29), /* sdiv */
996 COSTS_N_INSNS (29), /* ddiv */
997 32, /* cache line size */
998 32, /* l1 cache */
999 256, /* l2 cache */
1000 1, /* prefetch streams /*/
1001 0, /* SF->DF convert */
1004 /* Instruction costs on E300C2 and E300C3 cores. */
1005 static const
1006 struct processor_costs ppce300c2c3_cost = {
1007 COSTS_N_INSNS (4), /* mulsi */
1008 COSTS_N_INSNS (4), /* mulsi_const */
1009 COSTS_N_INSNS (4), /* mulsi_const9 */
1010 COSTS_N_INSNS (4), /* muldi */
1011 COSTS_N_INSNS (19), /* divsi */
1012 COSTS_N_INSNS (19), /* divdi */
1013 COSTS_N_INSNS (3), /* fp */
1014 COSTS_N_INSNS (4), /* dmul */
1015 COSTS_N_INSNS (18), /* sdiv */
1016 COSTS_N_INSNS (33), /* ddiv */
1018 16, /* l1 cache */
1019 16, /* l2 cache */
1020 1, /* prefetch streams /*/
1021 0, /* SF->DF convert */
1024 /* Instruction costs on PPCE500MC processors. */
1025 static const
1026 struct processor_costs ppce500mc_cost = {
1027 COSTS_N_INSNS (4), /* mulsi */
1028 COSTS_N_INSNS (4), /* mulsi_const */
1029 COSTS_N_INSNS (4), /* mulsi_const9 */
1030 COSTS_N_INSNS (4), /* muldi */
1031 COSTS_N_INSNS (14), /* divsi */
1032 COSTS_N_INSNS (14), /* divdi */
1033 COSTS_N_INSNS (8), /* fp */
1034 COSTS_N_INSNS (10), /* dmul */
1035 COSTS_N_INSNS (36), /* sdiv */
1036 COSTS_N_INSNS (66), /* ddiv */
1037 64, /* cache line size */
1038 32, /* l1 cache */
1039 128, /* l2 cache */
1040 1, /* prefetch streams /*/
1041 0, /* SF->DF convert */
1044 /* Instruction costs on PPCE500MC64 processors. */
1045 static const
1046 struct processor_costs ppce500mc64_cost = {
1047 COSTS_N_INSNS (4), /* mulsi */
1048 COSTS_N_INSNS (4), /* mulsi_const */
1049 COSTS_N_INSNS (4), /* mulsi_const9 */
1050 COSTS_N_INSNS (4), /* muldi */
1051 COSTS_N_INSNS (14), /* divsi */
1052 COSTS_N_INSNS (14), /* divdi */
1053 COSTS_N_INSNS (4), /* fp */
1054 COSTS_N_INSNS (10), /* dmul */
1055 COSTS_N_INSNS (36), /* sdiv */
1056 COSTS_N_INSNS (66), /* ddiv */
1057 64, /* cache line size */
1058 32, /* l1 cache */
1059 128, /* l2 cache */
1060 1, /* prefetch streams /*/
1061 0, /* SF->DF convert */
1064 /* Instruction costs on PPCE5500 processors. */
1065 static const
1066 struct processor_costs ppce5500_cost = {
1067 COSTS_N_INSNS (5), /* mulsi */
1068 COSTS_N_INSNS (5), /* mulsi_const */
1069 COSTS_N_INSNS (4), /* mulsi_const9 */
1070 COSTS_N_INSNS (5), /* muldi */
1071 COSTS_N_INSNS (14), /* divsi */
1072 COSTS_N_INSNS (14), /* divdi */
1073 COSTS_N_INSNS (7), /* fp */
1074 COSTS_N_INSNS (10), /* dmul */
1075 COSTS_N_INSNS (36), /* sdiv */
1076 COSTS_N_INSNS (66), /* ddiv */
1077 64, /* cache line size */
1078 32, /* l1 cache */
1079 128, /* l2 cache */
1080 1, /* prefetch streams /*/
1081 0, /* SF->DF convert */
1084 /* Instruction costs on PPCE6500 processors. */
1085 static const
1086 struct processor_costs ppce6500_cost = {
1087 COSTS_N_INSNS (5), /* mulsi */
1088 COSTS_N_INSNS (5), /* mulsi_const */
1089 COSTS_N_INSNS (4), /* mulsi_const9 */
1090 COSTS_N_INSNS (5), /* muldi */
1091 COSTS_N_INSNS (14), /* divsi */
1092 COSTS_N_INSNS (14), /* divdi */
1093 COSTS_N_INSNS (7), /* fp */
1094 COSTS_N_INSNS (10), /* dmul */
1095 COSTS_N_INSNS (36), /* sdiv */
1096 COSTS_N_INSNS (66), /* ddiv */
1097 64, /* cache line size */
1098 32, /* l1 cache */
1099 128, /* l2 cache */
1100 1, /* prefetch streams /*/
1101 0, /* SF->DF convert */
1104 /* Instruction costs on AppliedMicro Titan processors. */
1105 static const
1106 struct processor_costs titan_cost = {
1107 COSTS_N_INSNS (5), /* mulsi */
1108 COSTS_N_INSNS (5), /* mulsi_const */
1109 COSTS_N_INSNS (5), /* mulsi_const9 */
1110 COSTS_N_INSNS (5), /* muldi */
1111 COSTS_N_INSNS (18), /* divsi */
1112 COSTS_N_INSNS (18), /* divdi */
1113 COSTS_N_INSNS (10), /* fp */
1114 COSTS_N_INSNS (10), /* dmul */
1115 COSTS_N_INSNS (46), /* sdiv */
1116 COSTS_N_INSNS (72), /* ddiv */
1117 32, /* cache line size */
1118 32, /* l1 cache */
1119 512, /* l2 cache */
1120 1, /* prefetch streams /*/
1121 0, /* SF->DF convert */
1124 /* Instruction costs on POWER4 and POWER5 processors. */
1125 static const
1126 struct processor_costs power4_cost = {
1127 COSTS_N_INSNS (3), /* mulsi */
1128 COSTS_N_INSNS (2), /* mulsi_const */
1129 COSTS_N_INSNS (2), /* mulsi_const9 */
1130 COSTS_N_INSNS (4), /* muldi */
1131 COSTS_N_INSNS (18), /* divsi */
1132 COSTS_N_INSNS (34), /* divdi */
1133 COSTS_N_INSNS (3), /* fp */
1134 COSTS_N_INSNS (3), /* dmul */
1135 COSTS_N_INSNS (17), /* sdiv */
1136 COSTS_N_INSNS (17), /* ddiv */
1137 128, /* cache line size */
1138 32, /* l1 cache */
1139 1024, /* l2 cache */
1140 8, /* prefetch streams /*/
1141 0, /* SF->DF convert */
1144 /* Instruction costs on POWER6 processors. */
1145 static const
1146 struct processor_costs power6_cost = {
1147 COSTS_N_INSNS (8), /* mulsi */
1148 COSTS_N_INSNS (8), /* mulsi_const */
1149 COSTS_N_INSNS (8), /* mulsi_const9 */
1150 COSTS_N_INSNS (8), /* muldi */
1151 COSTS_N_INSNS (22), /* divsi */
1152 COSTS_N_INSNS (28), /* divdi */
1153 COSTS_N_INSNS (3), /* fp */
1154 COSTS_N_INSNS (3), /* dmul */
1155 COSTS_N_INSNS (13), /* sdiv */
1156 COSTS_N_INSNS (16), /* ddiv */
1157 128, /* cache line size */
1158 64, /* l1 cache */
1159 2048, /* l2 cache */
1160 16, /* prefetch streams */
1161 0, /* SF->DF convert */
1164 /* Instruction costs on POWER7 processors. */
1165 static const
1166 struct processor_costs power7_cost = {
1167 COSTS_N_INSNS (2), /* mulsi */
1168 COSTS_N_INSNS (2), /* mulsi_const */
1169 COSTS_N_INSNS (2), /* mulsi_const9 */
1170 COSTS_N_INSNS (2), /* muldi */
1171 COSTS_N_INSNS (18), /* divsi */
1172 COSTS_N_INSNS (34), /* divdi */
1173 COSTS_N_INSNS (3), /* fp */
1174 COSTS_N_INSNS (3), /* dmul */
1175 COSTS_N_INSNS (13), /* sdiv */
1176 COSTS_N_INSNS (16), /* ddiv */
1177 128, /* cache line size */
1178 32, /* l1 cache */
1179 256, /* l2 cache */
1180 12, /* prefetch streams */
1181 COSTS_N_INSNS (3), /* SF->DF convert */
1184 /* Instruction costs on POWER8 processors. */
1185 static const
1186 struct processor_costs power8_cost = {
1187 COSTS_N_INSNS (3), /* mulsi */
1188 COSTS_N_INSNS (3), /* mulsi_const */
1189 COSTS_N_INSNS (3), /* mulsi_const9 */
1190 COSTS_N_INSNS (3), /* muldi */
1191 COSTS_N_INSNS (19), /* divsi */
1192 COSTS_N_INSNS (35), /* divdi */
1193 COSTS_N_INSNS (3), /* fp */
1194 COSTS_N_INSNS (3), /* dmul */
1195 COSTS_N_INSNS (14), /* sdiv */
1196 COSTS_N_INSNS (17), /* ddiv */
1197 128, /* cache line size */
1198 32, /* l1 cache */
1199 256, /* l2 cache */
1200 12, /* prefetch streams */
1201 COSTS_N_INSNS (3), /* SF->DF convert */
1204 /* Instruction costs on POWER9 processors. */
1205 static const
1206 struct processor_costs power9_cost = {
1207 COSTS_N_INSNS (3), /* mulsi */
1208 COSTS_N_INSNS (3), /* mulsi_const */
1209 COSTS_N_INSNS (3), /* mulsi_const9 */
1210 COSTS_N_INSNS (3), /* muldi */
1211 COSTS_N_INSNS (8), /* divsi */
1212 COSTS_N_INSNS (12), /* divdi */
1213 COSTS_N_INSNS (3), /* fp */
1214 COSTS_N_INSNS (3), /* dmul */
1215 COSTS_N_INSNS (13), /* sdiv */
1216 COSTS_N_INSNS (18), /* ddiv */
1217 128, /* cache line size */
1218 32, /* l1 cache */
1219 512, /* l2 cache */
1220 8, /* prefetch streams */
1221 COSTS_N_INSNS (3), /* SF->DF convert */
1224 /* Instruction costs on POWER A2 processors. */
1225 static const
1226 struct processor_costs ppca2_cost = {
1227 COSTS_N_INSNS (16), /* mulsi */
1228 COSTS_N_INSNS (16), /* mulsi_const */
1229 COSTS_N_INSNS (16), /* mulsi_const9 */
1230 COSTS_N_INSNS (16), /* muldi */
1231 COSTS_N_INSNS (22), /* divsi */
1232 COSTS_N_INSNS (28), /* divdi */
1233 COSTS_N_INSNS (3), /* fp */
1234 COSTS_N_INSNS (3), /* dmul */
1235 COSTS_N_INSNS (59), /* sdiv */
1236 COSTS_N_INSNS (72), /* ddiv */
1238 16, /* l1 cache */
1239 2048, /* l2 cache */
1240 16, /* prefetch streams */
1241 0, /* SF->DF convert */
1245 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
1246 #undef RS6000_BUILTIN_0
1247 #undef RS6000_BUILTIN_1
1248 #undef RS6000_BUILTIN_2
1249 #undef RS6000_BUILTIN_3
1250 #undef RS6000_BUILTIN_A
1251 #undef RS6000_BUILTIN_D
1252 #undef RS6000_BUILTIN_E
1253 #undef RS6000_BUILTIN_H
1254 #undef RS6000_BUILTIN_P
1255 #undef RS6000_BUILTIN_Q
1256 #undef RS6000_BUILTIN_S
1257 #undef RS6000_BUILTIN_X
1259 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
1260 { NAME, ICODE, MASK, ATTR },
1262 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1263 { NAME, ICODE, MASK, ATTR },
1265 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1266 { NAME, ICODE, MASK, ATTR },
1268 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1269 { NAME, ICODE, MASK, ATTR },
1271 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1272 { NAME, ICODE, MASK, ATTR },
1274 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1275 { NAME, ICODE, MASK, ATTR },
1277 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
1278 { NAME, ICODE, MASK, ATTR },
1280 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1281 { NAME, ICODE, MASK, ATTR },
1283 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1284 { NAME, ICODE, MASK, ATTR },
1286 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
1287 { NAME, ICODE, MASK, ATTR },
1289 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
1290 { NAME, ICODE, MASK, ATTR },
1292 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1293 { NAME, ICODE, MASK, ATTR },
1295 struct rs6000_builtin_info_type {
1296 const char *name;
1297 const enum insn_code icode;
1298 const HOST_WIDE_INT mask;
1299 const unsigned attr;
1302 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
1304 #include "rs6000-builtin.def"
1307 #undef RS6000_BUILTIN_0
1308 #undef RS6000_BUILTIN_1
1309 #undef RS6000_BUILTIN_2
1310 #undef RS6000_BUILTIN_3
1311 #undef RS6000_BUILTIN_A
1312 #undef RS6000_BUILTIN_D
1313 #undef RS6000_BUILTIN_E
1314 #undef RS6000_BUILTIN_H
1315 #undef RS6000_BUILTIN_P
1316 #undef RS6000_BUILTIN_Q
1317 #undef RS6000_BUILTIN_S
1318 #undef RS6000_BUILTIN_X
1320 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1321 static tree (*rs6000_veclib_handler) (combined_fn, tree, tree);
1324 static bool rs6000_debug_legitimate_address_p (machine_mode, rtx, bool);
1325 static bool spe_func_has_64bit_regs_p (void);
1326 static struct machine_function * rs6000_init_machine_status (void);
1327 static int rs6000_ra_ever_killed (void);
1328 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
1329 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
1330 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
1331 static tree rs6000_builtin_vectorized_libmass (combined_fn, tree, tree);
1332 static void rs6000_emit_set_long_const (rtx, HOST_WIDE_INT);
1333 static int rs6000_memory_move_cost (machine_mode, reg_class_t, bool);
1334 static bool rs6000_debug_rtx_costs (rtx, machine_mode, int, int, int *, bool);
1335 static int rs6000_debug_address_cost (rtx, machine_mode, addr_space_t,
1336 bool);
1337 static int rs6000_debug_adjust_cost (rtx_insn *, int, rtx_insn *, int,
1338 unsigned int);
1339 static bool is_microcoded_insn (rtx_insn *);
1340 static bool is_nonpipeline_insn (rtx_insn *);
1341 static bool is_cracked_insn (rtx_insn *);
1342 static bool is_load_insn (rtx, rtx *);
1343 static bool is_store_insn (rtx, rtx *);
1344 static bool set_to_load_agen (rtx_insn *,rtx_insn *);
1345 static bool insn_terminates_group_p (rtx_insn *, enum group_termination);
1346 static bool insn_must_be_first_in_group (rtx_insn *);
1347 static bool insn_must_be_last_in_group (rtx_insn *);
1348 static void altivec_init_builtins (void);
1349 static tree builtin_function_type (machine_mode, machine_mode,
1350 machine_mode, machine_mode,
1351 enum rs6000_builtins, const char *name);
1352 static void rs6000_common_init_builtins (void);
1353 static void paired_init_builtins (void);
1354 static rtx paired_expand_predicate_builtin (enum insn_code, tree, rtx);
1355 static void spe_init_builtins (void);
1356 static void htm_init_builtins (void);
1357 static rtx spe_expand_predicate_builtin (enum insn_code, tree, rtx);
1358 static rtx spe_expand_evsel_builtin (enum insn_code, tree, rtx);
1359 static int rs6000_emit_int_cmove (rtx, rtx, rtx, rtx);
1360 static rs6000_stack_t *rs6000_stack_info (void);
1361 static void is_altivec_return_reg (rtx, void *);
1362 int easy_vector_constant (rtx, machine_mode);
1363 static rtx rs6000_debug_legitimize_address (rtx, rtx, machine_mode);
1364 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
1365 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
1366 bool, bool);
1367 #if TARGET_MACHO
1368 static void macho_branch_islands (void);
1369 #endif
1370 static rtx rs6000_legitimize_reload_address (rtx, machine_mode, int, int,
1371 int, int *);
1372 static rtx rs6000_debug_legitimize_reload_address (rtx, machine_mode, int,
1373 int, int, int *);
1374 static bool rs6000_mode_dependent_address (const_rtx);
1375 static bool rs6000_debug_mode_dependent_address (const_rtx);
1376 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
1377 machine_mode, rtx);
1378 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
1379 machine_mode,
1380 rtx);
1381 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
1382 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
1383 enum reg_class);
1384 static bool rs6000_secondary_memory_needed (enum reg_class, enum reg_class,
1385 machine_mode);
1386 static bool rs6000_debug_secondary_memory_needed (enum reg_class,
1387 enum reg_class,
1388 machine_mode);
1389 static bool rs6000_cannot_change_mode_class (machine_mode,
1390 machine_mode,
1391 enum reg_class);
1392 static bool rs6000_debug_cannot_change_mode_class (machine_mode,
1393 machine_mode,
1394 enum reg_class);
1395 static bool rs6000_save_toc_in_prologue_p (void);
1396 static rtx rs6000_internal_arg_pointer (void);
1398 rtx (*rs6000_legitimize_reload_address_ptr) (rtx, machine_mode, int, int,
1399 int, int *)
1400 = rs6000_legitimize_reload_address;
1402 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
1403 = rs6000_mode_dependent_address;
1405 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1406 machine_mode, rtx)
1407 = rs6000_secondary_reload_class;
1409 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1410 = rs6000_preferred_reload_class;
1412 bool (*rs6000_secondary_memory_needed_ptr) (enum reg_class, enum reg_class,
1413 machine_mode)
1414 = rs6000_secondary_memory_needed;
1416 bool (*rs6000_cannot_change_mode_class_ptr) (machine_mode,
1417 machine_mode,
1418 enum reg_class)
1419 = rs6000_cannot_change_mode_class;
1421 const int INSN_NOT_AVAILABLE = -1;
1423 static void rs6000_print_isa_options (FILE *, int, const char *,
1424 HOST_WIDE_INT);
1425 static void rs6000_print_builtin_options (FILE *, int, const char *,
1426 HOST_WIDE_INT);
1427 static HOST_WIDE_INT rs6000_disable_incompatible_switches (void);
1429 static enum rs6000_reg_type register_to_reg_type (rtx, bool *);
1430 static bool rs6000_secondary_reload_move (enum rs6000_reg_type,
1431 enum rs6000_reg_type,
1432 machine_mode,
1433 secondary_reload_info *,
1434 bool);
1435 rtl_opt_pass *make_pass_analyze_swaps (gcc::context*);
1436 static bool rs6000_keep_leaf_when_profiled () __attribute__ ((unused));
1437 static tree rs6000_fold_builtin (tree, int, tree *, bool);
1439 /* Hash table stuff for keeping track of TOC entries. */
1441 struct GTY((for_user)) toc_hash_struct
1443 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1444 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1445 rtx key;
1446 machine_mode key_mode;
1447 int labelno;
1450 struct toc_hasher : ggc_ptr_hash<toc_hash_struct>
1452 static hashval_t hash (toc_hash_struct *);
1453 static bool equal (toc_hash_struct *, toc_hash_struct *);
1456 static GTY (()) hash_table<toc_hasher> *toc_hash_table;
1458 /* Hash table to keep track of the argument types for builtin functions. */
1460 struct GTY((for_user)) builtin_hash_struct
1462 tree type;
1463 machine_mode mode[4]; /* return value + 3 arguments. */
1464 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1467 struct builtin_hasher : ggc_ptr_hash<builtin_hash_struct>
1469 static hashval_t hash (builtin_hash_struct *);
1470 static bool equal (builtin_hash_struct *, builtin_hash_struct *);
1473 static GTY (()) hash_table<builtin_hasher> *builtin_hash_table;
1476 /* Default register names. */
1477 char rs6000_reg_names[][8] =
1479 "0", "1", "2", "3", "4", "5", "6", "7",
1480 "8", "9", "10", "11", "12", "13", "14", "15",
1481 "16", "17", "18", "19", "20", "21", "22", "23",
1482 "24", "25", "26", "27", "28", "29", "30", "31",
1483 "0", "1", "2", "3", "4", "5", "6", "7",
1484 "8", "9", "10", "11", "12", "13", "14", "15",
1485 "16", "17", "18", "19", "20", "21", "22", "23",
1486 "24", "25", "26", "27", "28", "29", "30", "31",
1487 "mq", "lr", "ctr","ap",
1488 "0", "1", "2", "3", "4", "5", "6", "7",
1489 "ca",
1490 /* AltiVec registers. */
1491 "0", "1", "2", "3", "4", "5", "6", "7",
1492 "8", "9", "10", "11", "12", "13", "14", "15",
1493 "16", "17", "18", "19", "20", "21", "22", "23",
1494 "24", "25", "26", "27", "28", "29", "30", "31",
1495 "vrsave", "vscr",
1496 /* SPE registers. */
1497 "spe_acc", "spefscr",
1498 /* Soft frame pointer. */
1499 "sfp",
1500 /* HTM SPR registers. */
1501 "tfhar", "tfiar", "texasr",
1502 /* SPE High registers. */
1503 "0", "1", "2", "3", "4", "5", "6", "7",
1504 "8", "9", "10", "11", "12", "13", "14", "15",
1505 "16", "17", "18", "19", "20", "21", "22", "23",
1506 "24", "25", "26", "27", "28", "29", "30", "31"
1509 #ifdef TARGET_REGNAMES
1510 static const char alt_reg_names[][8] =
1512 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1513 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1514 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1515 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1516 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1517 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1518 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1519 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1520 "mq", "lr", "ctr", "ap",
1521 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1522 "ca",
1523 /* AltiVec registers. */
1524 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1525 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1526 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1527 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1528 "vrsave", "vscr",
1529 /* SPE registers. */
1530 "spe_acc", "spefscr",
1531 /* Soft frame pointer. */
1532 "sfp",
1533 /* HTM SPR registers. */
1534 "tfhar", "tfiar", "texasr",
1535 /* SPE High registers. */
1536 "%rh0", "%rh1", "%rh2", "%rh3", "%rh4", "%rh5", "%rh6", "%rh7",
1537 "%rh8", "%rh9", "%rh10", "%r11", "%rh12", "%rh13", "%rh14", "%rh15",
1538 "%rh16", "%rh17", "%rh18", "%rh19", "%rh20", "%rh21", "%rh22", "%rh23",
1539 "%rh24", "%rh25", "%rh26", "%rh27", "%rh28", "%rh29", "%rh30", "%rh31"
1541 #endif
1543 /* Table of valid machine attributes. */
1545 static const struct attribute_spec rs6000_attribute_table[] =
1547 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
1548 affects_type_identity } */
1549 { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute,
1550 false },
1551 { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1552 false },
1553 { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1554 false },
1555 { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1556 false },
1557 { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1558 false },
1559 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1560 SUBTARGET_ATTRIBUTE_TABLE,
1561 #endif
1562 { NULL, 0, 0, false, false, false, NULL, false }
1565 #ifndef TARGET_PROFILE_KERNEL
1566 #define TARGET_PROFILE_KERNEL 0
1567 #endif
1569 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1570 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1572 /* Initialize the GCC target structure. */
1573 #undef TARGET_ATTRIBUTE_TABLE
1574 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1575 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1576 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1577 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1578 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1580 #undef TARGET_ASM_ALIGNED_DI_OP
1581 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1583 /* Default unaligned ops are only provided for ELF. Find the ops needed
1584 for non-ELF systems. */
1585 #ifndef OBJECT_FORMAT_ELF
1586 #if TARGET_XCOFF
1587 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1588 64-bit targets. */
1589 #undef TARGET_ASM_UNALIGNED_HI_OP
1590 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1591 #undef TARGET_ASM_UNALIGNED_SI_OP
1592 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1593 #undef TARGET_ASM_UNALIGNED_DI_OP
1594 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1595 #else
1596 /* For Darwin. */
1597 #undef TARGET_ASM_UNALIGNED_HI_OP
1598 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1599 #undef TARGET_ASM_UNALIGNED_SI_OP
1600 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1601 #undef TARGET_ASM_UNALIGNED_DI_OP
1602 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1603 #undef TARGET_ASM_ALIGNED_DI_OP
1604 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1605 #endif
1606 #endif
1608 /* This hook deals with fixups for relocatable code and DI-mode objects
1609 in 64-bit code. */
1610 #undef TARGET_ASM_INTEGER
1611 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1613 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1614 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1615 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1616 #endif
1618 #undef TARGET_SET_UP_BY_PROLOGUE
1619 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1621 #undef TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS
1622 #define TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS rs6000_get_separate_components
1623 #undef TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB
1624 #define TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB rs6000_components_for_bb
1625 #undef TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS
1626 #define TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS rs6000_disqualify_components
1627 #undef TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS
1628 #define TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS rs6000_emit_prologue_components
1629 #undef TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS
1630 #define TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS rs6000_emit_epilogue_components
1631 #undef TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS
1632 #define TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS rs6000_set_handled_components
1634 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1635 #define TARGET_EXTRA_LIVE_ON_ENTRY rs6000_live_on_entry
1637 #undef TARGET_INTERNAL_ARG_POINTER
1638 #define TARGET_INTERNAL_ARG_POINTER rs6000_internal_arg_pointer
1640 #undef TARGET_HAVE_TLS
1641 #define TARGET_HAVE_TLS HAVE_AS_TLS
1643 #undef TARGET_CANNOT_FORCE_CONST_MEM
1644 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1646 #undef TARGET_DELEGITIMIZE_ADDRESS
1647 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1649 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1650 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1652 #undef TARGET_LEGITIMATE_COMBINED_INSN
1653 #define TARGET_LEGITIMATE_COMBINED_INSN rs6000_legitimate_combined_insn
1655 #undef TARGET_ASM_FUNCTION_PROLOGUE
1656 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1657 #undef TARGET_ASM_FUNCTION_EPILOGUE
1658 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1660 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1661 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1663 #undef TARGET_LEGITIMIZE_ADDRESS
1664 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1666 #undef TARGET_SCHED_VARIABLE_ISSUE
1667 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1669 #undef TARGET_SCHED_ISSUE_RATE
1670 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1671 #undef TARGET_SCHED_ADJUST_COST
1672 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1673 #undef TARGET_SCHED_ADJUST_PRIORITY
1674 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1675 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1676 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1677 #undef TARGET_SCHED_INIT
1678 #define TARGET_SCHED_INIT rs6000_sched_init
1679 #undef TARGET_SCHED_FINISH
1680 #define TARGET_SCHED_FINISH rs6000_sched_finish
1681 #undef TARGET_SCHED_REORDER
1682 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1683 #undef TARGET_SCHED_REORDER2
1684 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1686 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1687 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1689 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1690 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1692 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1693 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1694 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1695 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1696 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1697 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1698 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1699 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1701 #undef TARGET_SCHED_CAN_SPECULATE_INSN
1702 #define TARGET_SCHED_CAN_SPECULATE_INSN rs6000_sched_can_speculate_insn
1704 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1705 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1706 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1707 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1708 rs6000_builtin_support_vector_misalignment
1709 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1710 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1711 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1712 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1713 rs6000_builtin_vectorization_cost
1714 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1715 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1716 rs6000_preferred_simd_mode
1717 #undef TARGET_VECTORIZE_INIT_COST
1718 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1719 #undef TARGET_VECTORIZE_ADD_STMT_COST
1720 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1721 #undef TARGET_VECTORIZE_FINISH_COST
1722 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1723 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1724 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1726 #undef TARGET_INIT_BUILTINS
1727 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1728 #undef TARGET_BUILTIN_DECL
1729 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1731 #undef TARGET_FOLD_BUILTIN
1732 #define TARGET_FOLD_BUILTIN rs6000_fold_builtin
1733 #undef TARGET_GIMPLE_FOLD_BUILTIN
1734 #define TARGET_GIMPLE_FOLD_BUILTIN rs6000_gimple_fold_builtin
1736 #undef TARGET_EXPAND_BUILTIN
1737 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1739 #undef TARGET_MANGLE_TYPE
1740 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1742 #undef TARGET_INIT_LIBFUNCS
1743 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1745 #if TARGET_MACHO
1746 #undef TARGET_BINDS_LOCAL_P
1747 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1748 #endif
1750 #undef TARGET_MS_BITFIELD_LAYOUT_P
1751 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1753 #undef TARGET_ASM_OUTPUT_MI_THUNK
1754 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1756 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1757 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1759 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1760 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1762 #undef TARGET_REGISTER_MOVE_COST
1763 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1764 #undef TARGET_MEMORY_MOVE_COST
1765 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1766 #undef TARGET_CANNOT_COPY_INSN_P
1767 #define TARGET_CANNOT_COPY_INSN_P rs6000_cannot_copy_insn_p
1768 #undef TARGET_RTX_COSTS
1769 #define TARGET_RTX_COSTS rs6000_rtx_costs
1770 #undef TARGET_ADDRESS_COST
1771 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1773 #undef TARGET_DWARF_REGISTER_SPAN
1774 #define TARGET_DWARF_REGISTER_SPAN rs6000_dwarf_register_span
1776 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1777 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1779 #undef TARGET_MEMBER_TYPE_FORCES_BLK
1780 #define TARGET_MEMBER_TYPE_FORCES_BLK rs6000_member_type_forces_blk
1782 #undef TARGET_PROMOTE_FUNCTION_MODE
1783 #define TARGET_PROMOTE_FUNCTION_MODE rs6000_promote_function_mode
1785 #undef TARGET_RETURN_IN_MEMORY
1786 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1788 #undef TARGET_RETURN_IN_MSB
1789 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1791 #undef TARGET_SETUP_INCOMING_VARARGS
1792 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1794 /* Always strict argument naming on rs6000. */
1795 #undef TARGET_STRICT_ARGUMENT_NAMING
1796 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1797 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1798 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1799 #undef TARGET_SPLIT_COMPLEX_ARG
1800 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1801 #undef TARGET_MUST_PASS_IN_STACK
1802 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1803 #undef TARGET_PASS_BY_REFERENCE
1804 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1805 #undef TARGET_ARG_PARTIAL_BYTES
1806 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1807 #undef TARGET_FUNCTION_ARG_ADVANCE
1808 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1809 #undef TARGET_FUNCTION_ARG
1810 #define TARGET_FUNCTION_ARG rs6000_function_arg
1811 #undef TARGET_FUNCTION_ARG_BOUNDARY
1812 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1814 #undef TARGET_BUILD_BUILTIN_VA_LIST
1815 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1817 #undef TARGET_EXPAND_BUILTIN_VA_START
1818 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1820 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1821 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1823 #undef TARGET_EH_RETURN_FILTER_MODE
1824 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1826 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1827 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1829 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1830 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1832 #undef TARGET_FLOATN_MODE
1833 #define TARGET_FLOATN_MODE rs6000_floatn_mode
1835 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1836 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1838 #undef TARGET_ASM_LOOP_ALIGN_MAX_SKIP
1839 #define TARGET_ASM_LOOP_ALIGN_MAX_SKIP rs6000_loop_align_max_skip
1841 #undef TARGET_MD_ASM_ADJUST
1842 #define TARGET_MD_ASM_ADJUST rs6000_md_asm_adjust
1844 #undef TARGET_OPTION_OVERRIDE
1845 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1847 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1848 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1849 rs6000_builtin_vectorized_function
1851 #undef TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION
1852 #define TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION \
1853 rs6000_builtin_md_vectorized_function
1855 #undef TARGET_STACK_PROTECT_GUARD
1856 #define TARGET_STACK_PROTECT_GUARD rs6000_init_stack_protect_guard
1858 #if !TARGET_MACHO
1859 #undef TARGET_STACK_PROTECT_FAIL
1860 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1861 #endif
1863 #ifdef HAVE_AS_TLS
1864 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1865 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1866 #endif
1868 /* Use a 32-bit anchor range. This leads to sequences like:
1870 addis tmp,anchor,high
1871 add dest,tmp,low
1873 where tmp itself acts as an anchor, and can be shared between
1874 accesses to the same 64k page. */
1875 #undef TARGET_MIN_ANCHOR_OFFSET
1876 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1877 #undef TARGET_MAX_ANCHOR_OFFSET
1878 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1879 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1880 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1881 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1882 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1884 #undef TARGET_BUILTIN_RECIPROCAL
1885 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1887 #undef TARGET_EXPAND_TO_RTL_HOOK
1888 #define TARGET_EXPAND_TO_RTL_HOOK rs6000_alloc_sdmode_stack_slot
1890 #undef TARGET_INSTANTIATE_DECLS
1891 #define TARGET_INSTANTIATE_DECLS rs6000_instantiate_decls
1893 #undef TARGET_SECONDARY_RELOAD
1894 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1896 #undef TARGET_LEGITIMATE_ADDRESS_P
1897 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1899 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1900 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1902 #undef TARGET_LRA_P
1903 #define TARGET_LRA_P rs6000_lra_p
1905 #undef TARGET_COMPUTE_PRESSURE_CLASSES
1906 #define TARGET_COMPUTE_PRESSURE_CLASSES rs6000_compute_pressure_classes
1908 #undef TARGET_CAN_ELIMINATE
1909 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1911 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1912 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1914 #undef TARGET_SCHED_REASSOCIATION_WIDTH
1915 #define TARGET_SCHED_REASSOCIATION_WIDTH rs6000_reassociation_width
1917 #undef TARGET_TRAMPOLINE_INIT
1918 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1920 #undef TARGET_FUNCTION_VALUE
1921 #define TARGET_FUNCTION_VALUE rs6000_function_value
1923 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1924 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1926 #undef TARGET_OPTION_SAVE
1927 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1929 #undef TARGET_OPTION_RESTORE
1930 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1932 #undef TARGET_OPTION_PRINT
1933 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1935 #undef TARGET_CAN_INLINE_P
1936 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1938 #undef TARGET_SET_CURRENT_FUNCTION
1939 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1941 #undef TARGET_LEGITIMATE_CONSTANT_P
1942 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1944 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
1945 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK rs6000_vectorize_vec_perm_const_ok
1947 #undef TARGET_CAN_USE_DOLOOP_P
1948 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1950 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
1951 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV rs6000_atomic_assign_expand_fenv
1953 #undef TARGET_LIBGCC_CMP_RETURN_MODE
1954 #define TARGET_LIBGCC_CMP_RETURN_MODE rs6000_abi_word_mode
1955 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
1956 #define TARGET_LIBGCC_SHIFT_COUNT_MODE rs6000_abi_word_mode
1957 #undef TARGET_UNWIND_WORD_MODE
1958 #define TARGET_UNWIND_WORD_MODE rs6000_abi_word_mode
1960 #undef TARGET_OFFLOAD_OPTIONS
1961 #define TARGET_OFFLOAD_OPTIONS rs6000_offload_options
1963 #undef TARGET_C_MODE_FOR_SUFFIX
1964 #define TARGET_C_MODE_FOR_SUFFIX rs6000_c_mode_for_suffix
1966 #undef TARGET_INVALID_BINARY_OP
1967 #define TARGET_INVALID_BINARY_OP rs6000_invalid_binary_op
1969 #undef TARGET_OPTAB_SUPPORTED_P
1970 #define TARGET_OPTAB_SUPPORTED_P rs6000_optab_supported_p
1972 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
1973 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
1976 /* Processor table. */
1977 struct rs6000_ptt
1979 const char *const name; /* Canonical processor name. */
1980 const enum processor_type processor; /* Processor type enum value. */
1981 const HOST_WIDE_INT target_enable; /* Target flags to enable. */
1984 static struct rs6000_ptt const processor_target_table[] =
1986 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1987 #include "rs6000-cpus.def"
1988 #undef RS6000_CPU
1991 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
1992 name is invalid. */
1994 static int
1995 rs6000_cpu_name_lookup (const char *name)
1997 size_t i;
1999 if (name != NULL)
2001 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
2002 if (! strcmp (name, processor_target_table[i].name))
2003 return (int)i;
2006 return -1;
2010 /* Return number of consecutive hard regs needed starting at reg REGNO
2011 to hold something of mode MODE.
2012 This is ordinarily the length in words of a value of mode MODE
2013 but can be less for certain modes in special long registers.
2015 For the SPE, GPRs are 64 bits but only 32 bits are visible in
2016 scalar instructions. The upper 32 bits are only available to the
2017 SIMD instructions.
2019 POWER and PowerPC GPRs hold 32 bits worth;
2020 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
2022 static int
2023 rs6000_hard_regno_nregs_internal (int regno, machine_mode mode)
2025 unsigned HOST_WIDE_INT reg_size;
2027 /* 128-bit floating point usually takes 2 registers, unless it is IEEE
2028 128-bit floating point that can go in vector registers, which has VSX
2029 memory addressing. */
2030 if (FP_REGNO_P (regno))
2031 reg_size = (VECTOR_MEM_VSX_P (mode) || FLOAT128_VECTOR_P (mode)
2032 ? UNITS_PER_VSX_WORD
2033 : UNITS_PER_FP_WORD);
2035 else if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
2036 reg_size = UNITS_PER_SPE_WORD;
2038 else if (ALTIVEC_REGNO_P (regno))
2039 reg_size = UNITS_PER_ALTIVEC_WORD;
2041 /* The value returned for SCmode in the E500 double case is 2 for
2042 ABI compatibility; storing an SCmode value in a single register
2043 would require function_arg and rs6000_spe_function_arg to handle
2044 SCmode so as to pass the value correctly in a pair of
2045 registers. */
2046 else if (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode) && mode != SCmode
2047 && !DECIMAL_FLOAT_MODE_P (mode) && SPE_SIMD_REGNO_P (regno))
2048 reg_size = UNITS_PER_FP_WORD;
2050 else
2051 reg_size = UNITS_PER_WORD;
2053 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
2056 /* Value is 1 if hard register REGNO can hold a value of machine-mode
2057 MODE. */
2058 static int
2059 rs6000_hard_regno_mode_ok (int regno, machine_mode mode)
2061 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
2063 if (COMPLEX_MODE_P (mode))
2064 mode = GET_MODE_INNER (mode);
2066 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
2067 register combinations, and use PTImode where we need to deal with quad
2068 word memory operations. Don't allow quad words in the argument or frame
2069 pointer registers, just registers 0..31. */
2070 if (mode == PTImode)
2071 return (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2072 && IN_RANGE (last_regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2073 && ((regno & 1) == 0));
2075 /* VSX registers that overlap the FPR registers are larger than for non-VSX
2076 implementations. Don't allow an item to be split between a FP register
2077 and an Altivec register. Allow TImode in all VSX registers if the user
2078 asked for it. */
2079 if (TARGET_VSX && VSX_REGNO_P (regno)
2080 && (VECTOR_MEM_VSX_P (mode)
2081 || FLOAT128_VECTOR_P (mode)
2082 || reg_addr[mode].scalar_in_vmx_p
2083 || (TARGET_VSX_TIMODE && mode == TImode)
2084 || (TARGET_VADDUQM && mode == V1TImode)))
2086 if (FP_REGNO_P (regno))
2087 return FP_REGNO_P (last_regno);
2089 if (ALTIVEC_REGNO_P (regno))
2091 if (GET_MODE_SIZE (mode) != 16 && !reg_addr[mode].scalar_in_vmx_p)
2092 return 0;
2094 return ALTIVEC_REGNO_P (last_regno);
2098 /* The GPRs can hold any mode, but values bigger than one register
2099 cannot go past R31. */
2100 if (INT_REGNO_P (regno))
2101 return INT_REGNO_P (last_regno);
2103 /* The float registers (except for VSX vector modes) can only hold floating
2104 modes and DImode. */
2105 if (FP_REGNO_P (regno))
2107 if (FLOAT128_VECTOR_P (mode))
2108 return false;
2110 if (SCALAR_FLOAT_MODE_P (mode)
2111 && (mode != TDmode || (regno % 2) == 0)
2112 && FP_REGNO_P (last_regno))
2113 return 1;
2115 if (GET_MODE_CLASS (mode) == MODE_INT)
2117 if(GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
2118 return 1;
2120 if (TARGET_VSX_SMALL_INTEGER)
2122 if (mode == SImode)
2123 return 1;
2125 if (TARGET_P9_VECTOR && (mode == HImode || mode == QImode))
2126 return 1;
2130 if (PAIRED_SIMD_REGNO_P (regno) && TARGET_PAIRED_FLOAT
2131 && PAIRED_VECTOR_MODE (mode))
2132 return 1;
2134 return 0;
2137 /* The CR register can only hold CC modes. */
2138 if (CR_REGNO_P (regno))
2139 return GET_MODE_CLASS (mode) == MODE_CC;
2141 if (CA_REGNO_P (regno))
2142 return mode == Pmode || mode == SImode;
2144 /* AltiVec only in AldyVec registers. */
2145 if (ALTIVEC_REGNO_P (regno))
2146 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
2147 || mode == V1TImode);
2149 /* ...but GPRs can hold SIMD data on the SPE in one register. */
2150 if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
2151 return 1;
2153 /* We cannot put non-VSX TImode or PTImode anywhere except general register
2154 and it must be able to fit within the register set. */
2156 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
2159 /* Print interesting facts about registers. */
2160 static void
2161 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
2163 int r, m;
2165 for (r = first_regno; r <= last_regno; ++r)
2167 const char *comma = "";
2168 int len;
2170 if (first_regno == last_regno)
2171 fprintf (stderr, "%s:\t", reg_name);
2172 else
2173 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
2175 len = 8;
2176 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2177 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
2179 if (len > 70)
2181 fprintf (stderr, ",\n\t");
2182 len = 8;
2183 comma = "";
2186 if (rs6000_hard_regno_nregs[m][r] > 1)
2187 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
2188 rs6000_hard_regno_nregs[m][r]);
2189 else
2190 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
2192 comma = ", ";
2195 if (call_used_regs[r])
2197 if (len > 70)
2199 fprintf (stderr, ",\n\t");
2200 len = 8;
2201 comma = "";
2204 len += fprintf (stderr, "%s%s", comma, "call-used");
2205 comma = ", ";
2208 if (fixed_regs[r])
2210 if (len > 70)
2212 fprintf (stderr, ",\n\t");
2213 len = 8;
2214 comma = "";
2217 len += fprintf (stderr, "%s%s", comma, "fixed");
2218 comma = ", ";
2221 if (len > 70)
2223 fprintf (stderr, ",\n\t");
2224 comma = "";
2227 len += fprintf (stderr, "%sreg-class = %s", comma,
2228 reg_class_names[(int)rs6000_regno_regclass[r]]);
2229 comma = ", ";
2231 if (len > 70)
2233 fprintf (stderr, ",\n\t");
2234 comma = "";
2237 fprintf (stderr, "%sregno = %d\n", comma, r);
2241 static const char *
2242 rs6000_debug_vector_unit (enum rs6000_vector v)
2244 const char *ret;
2246 switch (v)
2248 case VECTOR_NONE: ret = "none"; break;
2249 case VECTOR_ALTIVEC: ret = "altivec"; break;
2250 case VECTOR_VSX: ret = "vsx"; break;
2251 case VECTOR_P8_VECTOR: ret = "p8_vector"; break;
2252 case VECTOR_PAIRED: ret = "paired"; break;
2253 case VECTOR_SPE: ret = "spe"; break;
2254 case VECTOR_OTHER: ret = "other"; break;
2255 default: ret = "unknown"; break;
2258 return ret;
2261 /* Inner function printing just the address mask for a particular reload
2262 register class. */
2263 DEBUG_FUNCTION char *
2264 rs6000_debug_addr_mask (addr_mask_type mask, bool keep_spaces)
2266 static char ret[8];
2267 char *p = ret;
2269 if ((mask & RELOAD_REG_VALID) != 0)
2270 *p++ = 'v';
2271 else if (keep_spaces)
2272 *p++ = ' ';
2274 if ((mask & RELOAD_REG_MULTIPLE) != 0)
2275 *p++ = 'm';
2276 else if (keep_spaces)
2277 *p++ = ' ';
2279 if ((mask & RELOAD_REG_INDEXED) != 0)
2280 *p++ = 'i';
2281 else if (keep_spaces)
2282 *p++ = ' ';
2284 if ((mask & RELOAD_REG_QUAD_OFFSET) != 0)
2285 *p++ = 'O';
2286 else if ((mask & RELOAD_REG_OFFSET) != 0)
2287 *p++ = 'o';
2288 else if (keep_spaces)
2289 *p++ = ' ';
2291 if ((mask & RELOAD_REG_PRE_INCDEC) != 0)
2292 *p++ = '+';
2293 else if (keep_spaces)
2294 *p++ = ' ';
2296 if ((mask & RELOAD_REG_PRE_MODIFY) != 0)
2297 *p++ = '+';
2298 else if (keep_spaces)
2299 *p++ = ' ';
2301 if ((mask & RELOAD_REG_AND_M16) != 0)
2302 *p++ = '&';
2303 else if (keep_spaces)
2304 *p++ = ' ';
2306 *p = '\0';
2308 return ret;
2311 /* Print the address masks in a human readble fashion. */
2312 DEBUG_FUNCTION void
2313 rs6000_debug_print_mode (ssize_t m)
2315 ssize_t rc;
2316 int spaces = 0;
2317 bool fuse_extra_p;
2319 fprintf (stderr, "Mode: %-5s", GET_MODE_NAME (m));
2320 for (rc = 0; rc < N_RELOAD_REG; rc++)
2321 fprintf (stderr, " %s: %s", reload_reg_map[rc].name,
2322 rs6000_debug_addr_mask (reg_addr[m].addr_mask[rc], true));
2324 if ((reg_addr[m].reload_store != CODE_FOR_nothing)
2325 || (reg_addr[m].reload_load != CODE_FOR_nothing))
2326 fprintf (stderr, " Reload=%c%c",
2327 (reg_addr[m].reload_store != CODE_FOR_nothing) ? 's' : '*',
2328 (reg_addr[m].reload_load != CODE_FOR_nothing) ? 'l' : '*');
2329 else
2330 spaces += sizeof (" Reload=sl") - 1;
2332 if (reg_addr[m].scalar_in_vmx_p)
2334 fprintf (stderr, "%*s Upper=y", spaces, "");
2335 spaces = 0;
2337 else
2338 spaces += sizeof (" Upper=y") - 1;
2340 fuse_extra_p = ((reg_addr[m].fusion_gpr_ld != CODE_FOR_nothing)
2341 || reg_addr[m].fused_toc);
2342 if (!fuse_extra_p)
2344 for (rc = 0; rc < N_RELOAD_REG; rc++)
2346 if (rc != RELOAD_REG_ANY)
2348 if (reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing
2349 || reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing
2350 || reg_addr[m].fusion_addi_st[rc] != CODE_FOR_nothing
2351 || reg_addr[m].fusion_addis_ld[rc] != CODE_FOR_nothing
2352 || reg_addr[m].fusion_addis_st[rc] != CODE_FOR_nothing)
2354 fuse_extra_p = true;
2355 break;
2361 if (fuse_extra_p)
2363 fprintf (stderr, "%*s Fuse:", spaces, "");
2364 spaces = 0;
2366 for (rc = 0; rc < N_RELOAD_REG; rc++)
2368 if (rc != RELOAD_REG_ANY)
2370 char load, store;
2372 if (reg_addr[m].fusion_addis_ld[rc] != CODE_FOR_nothing)
2373 load = 'l';
2374 else if (reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing)
2375 load = 'L';
2376 else
2377 load = '-';
2379 if (reg_addr[m].fusion_addis_st[rc] != CODE_FOR_nothing)
2380 store = 's';
2381 else if (reg_addr[m].fusion_addi_st[rc] != CODE_FOR_nothing)
2382 store = 'S';
2383 else
2384 store = '-';
2386 if (load == '-' && store == '-')
2387 spaces += 5;
2388 else
2390 fprintf (stderr, "%*s%c=%c%c", (spaces + 1), "",
2391 reload_reg_map[rc].name[0], load, store);
2392 spaces = 0;
2397 if (reg_addr[m].fusion_gpr_ld != CODE_FOR_nothing)
2399 fprintf (stderr, "%*sP8gpr", (spaces + 1), "");
2400 spaces = 0;
2402 else
2403 spaces += sizeof (" P8gpr") - 1;
2405 if (reg_addr[m].fused_toc)
2407 fprintf (stderr, "%*sToc", (spaces + 1), "");
2408 spaces = 0;
2410 else
2411 spaces += sizeof (" Toc") - 1;
2413 else
2414 spaces += sizeof (" Fuse: G=ls F=ls v=ls P8gpr Toc") - 1;
2416 if (rs6000_vector_unit[m] != VECTOR_NONE
2417 || rs6000_vector_mem[m] != VECTOR_NONE)
2419 fprintf (stderr, "%*s vector: arith=%-10s mem=%s",
2420 spaces, "",
2421 rs6000_debug_vector_unit (rs6000_vector_unit[m]),
2422 rs6000_debug_vector_unit (rs6000_vector_mem[m]));
2425 fputs ("\n", stderr);
2428 #define DEBUG_FMT_ID "%-32s= "
2429 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
2430 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
2431 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
2433 /* Print various interesting information with -mdebug=reg. */
2434 static void
2435 rs6000_debug_reg_global (void)
2437 static const char *const tf[2] = { "false", "true" };
2438 const char *nl = (const char *)0;
2439 int m;
2440 size_t m1, m2, v;
2441 char costly_num[20];
2442 char nop_num[20];
2443 char flags_buffer[40];
2444 const char *costly_str;
2445 const char *nop_str;
2446 const char *trace_str;
2447 const char *abi_str;
2448 const char *cmodel_str;
2449 struct cl_target_option cl_opts;
2451 /* Modes we want tieable information on. */
2452 static const machine_mode print_tieable_modes[] = {
2453 QImode,
2454 HImode,
2455 SImode,
2456 DImode,
2457 TImode,
2458 PTImode,
2459 SFmode,
2460 DFmode,
2461 TFmode,
2462 IFmode,
2463 KFmode,
2464 SDmode,
2465 DDmode,
2466 TDmode,
2467 V8QImode,
2468 V4HImode,
2469 V2SImode,
2470 V16QImode,
2471 V8HImode,
2472 V4SImode,
2473 V2DImode,
2474 V1TImode,
2475 V32QImode,
2476 V16HImode,
2477 V8SImode,
2478 V4DImode,
2479 V2TImode,
2480 V2SFmode,
2481 V4SFmode,
2482 V2DFmode,
2483 V8SFmode,
2484 V4DFmode,
2485 CCmode,
2486 CCUNSmode,
2487 CCEQmode,
2490 /* Virtual regs we are interested in. */
2491 const static struct {
2492 int regno; /* register number. */
2493 const char *name; /* register name. */
2494 } virtual_regs[] = {
2495 { STACK_POINTER_REGNUM, "stack pointer:" },
2496 { TOC_REGNUM, "toc: " },
2497 { STATIC_CHAIN_REGNUM, "static chain: " },
2498 { RS6000_PIC_OFFSET_TABLE_REGNUM, "pic offset: " },
2499 { HARD_FRAME_POINTER_REGNUM, "hard frame: " },
2500 { ARG_POINTER_REGNUM, "arg pointer: " },
2501 { FRAME_POINTER_REGNUM, "frame pointer:" },
2502 { FIRST_PSEUDO_REGISTER, "first pseudo: " },
2503 { FIRST_VIRTUAL_REGISTER, "first virtual:" },
2504 { VIRTUAL_INCOMING_ARGS_REGNUM, "incoming_args:" },
2505 { VIRTUAL_STACK_VARS_REGNUM, "stack_vars: " },
2506 { VIRTUAL_STACK_DYNAMIC_REGNUM, "stack_dynamic:" },
2507 { VIRTUAL_OUTGOING_ARGS_REGNUM, "outgoing_args:" },
2508 { VIRTUAL_CFA_REGNUM, "cfa (frame): " },
2509 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM, "stack boundry:" },
2510 { LAST_VIRTUAL_REGISTER, "last virtual: " },
2513 fputs ("\nHard register information:\n", stderr);
2514 rs6000_debug_reg_print (FIRST_GPR_REGNO, LAST_GPR_REGNO, "gr");
2515 rs6000_debug_reg_print (FIRST_FPR_REGNO, LAST_FPR_REGNO, "fp");
2516 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
2517 LAST_ALTIVEC_REGNO,
2518 "vs");
2519 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
2520 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
2521 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
2522 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
2523 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
2524 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
2525 rs6000_debug_reg_print (SPE_ACC_REGNO, SPE_ACC_REGNO, "spe_a");
2526 rs6000_debug_reg_print (SPEFSCR_REGNO, SPEFSCR_REGNO, "spe_f");
2528 fputs ("\nVirtual/stack/frame registers:\n", stderr);
2529 for (v = 0; v < ARRAY_SIZE (virtual_regs); v++)
2530 fprintf (stderr, "%s regno = %3d\n", virtual_regs[v].name, virtual_regs[v].regno);
2532 fprintf (stderr,
2533 "\n"
2534 "d reg_class = %s\n"
2535 "f reg_class = %s\n"
2536 "v reg_class = %s\n"
2537 "wa reg_class = %s\n"
2538 "wb reg_class = %s\n"
2539 "wd reg_class = %s\n"
2540 "we reg_class = %s\n"
2541 "wf reg_class = %s\n"
2542 "wg reg_class = %s\n"
2543 "wh reg_class = %s\n"
2544 "wi reg_class = %s\n"
2545 "wj reg_class = %s\n"
2546 "wk reg_class = %s\n"
2547 "wl reg_class = %s\n"
2548 "wm reg_class = %s\n"
2549 "wo reg_class = %s\n"
2550 "wp reg_class = %s\n"
2551 "wq reg_class = %s\n"
2552 "wr reg_class = %s\n"
2553 "ws reg_class = %s\n"
2554 "wt reg_class = %s\n"
2555 "wu reg_class = %s\n"
2556 "wv reg_class = %s\n"
2557 "ww reg_class = %s\n"
2558 "wx reg_class = %s\n"
2559 "wy reg_class = %s\n"
2560 "wz reg_class = %s\n"
2561 "wA reg_class = %s\n"
2562 "wH reg_class = %s\n"
2563 "wI reg_class = %s\n"
2564 "wJ reg_class = %s\n"
2565 "wK reg_class = %s\n"
2566 "\n",
2567 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
2568 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
2569 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
2570 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
2571 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wb]],
2572 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
2573 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_we]],
2574 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
2575 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wg]],
2576 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wh]],
2577 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wi]],
2578 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wj]],
2579 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wk]],
2580 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wl]],
2581 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wm]],
2582 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wo]],
2583 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wp]],
2584 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wq]],
2585 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wr]],
2586 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]],
2587 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wt]],
2588 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wu]],
2589 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wv]],
2590 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ww]],
2591 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wx]],
2592 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wy]],
2593 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wz]],
2594 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wA]],
2595 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wH]],
2596 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wI]],
2597 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wJ]],
2598 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wK]]);
2600 nl = "\n";
2601 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2602 rs6000_debug_print_mode (m);
2604 fputs ("\n", stderr);
2606 for (m1 = 0; m1 < ARRAY_SIZE (print_tieable_modes); m1++)
2608 machine_mode mode1 = print_tieable_modes[m1];
2609 bool first_time = true;
2611 nl = (const char *)0;
2612 for (m2 = 0; m2 < ARRAY_SIZE (print_tieable_modes); m2++)
2614 machine_mode mode2 = print_tieable_modes[m2];
2615 if (mode1 != mode2 && MODES_TIEABLE_P (mode1, mode2))
2617 if (first_time)
2619 fprintf (stderr, "Tieable modes %s:", GET_MODE_NAME (mode1));
2620 nl = "\n";
2621 first_time = false;
2624 fprintf (stderr, " %s", GET_MODE_NAME (mode2));
2628 if (!first_time)
2629 fputs ("\n", stderr);
2632 if (nl)
2633 fputs (nl, stderr);
2635 if (rs6000_recip_control)
2637 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
2639 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2640 if (rs6000_recip_bits[m])
2642 fprintf (stderr,
2643 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2644 GET_MODE_NAME (m),
2645 (RS6000_RECIP_AUTO_RE_P (m)
2646 ? "auto"
2647 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
2648 (RS6000_RECIP_AUTO_RSQRTE_P (m)
2649 ? "auto"
2650 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
2653 fputs ("\n", stderr);
2656 if (rs6000_cpu_index >= 0)
2658 const char *name = processor_target_table[rs6000_cpu_index].name;
2659 HOST_WIDE_INT flags
2660 = processor_target_table[rs6000_cpu_index].target_enable;
2662 sprintf (flags_buffer, "-mcpu=%s flags", name);
2663 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2665 else
2666 fprintf (stderr, DEBUG_FMT_S, "cpu", "<none>");
2668 if (rs6000_tune_index >= 0)
2670 const char *name = processor_target_table[rs6000_tune_index].name;
2671 HOST_WIDE_INT flags
2672 = processor_target_table[rs6000_tune_index].target_enable;
2674 sprintf (flags_buffer, "-mtune=%s flags", name);
2675 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2677 else
2678 fprintf (stderr, DEBUG_FMT_S, "tune", "<none>");
2680 cl_target_option_save (&cl_opts, &global_options);
2681 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags",
2682 rs6000_isa_flags);
2684 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags_explicit",
2685 rs6000_isa_flags_explicit);
2687 rs6000_print_builtin_options (stderr, 0, "rs6000_builtin_mask",
2688 rs6000_builtin_mask);
2690 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
2692 fprintf (stderr, DEBUG_FMT_S, "--with-cpu default",
2693 OPTION_TARGET_CPU_DEFAULT ? OPTION_TARGET_CPU_DEFAULT : "<none>");
2695 switch (rs6000_sched_costly_dep)
2697 case max_dep_latency:
2698 costly_str = "max_dep_latency";
2699 break;
2701 case no_dep_costly:
2702 costly_str = "no_dep_costly";
2703 break;
2705 case all_deps_costly:
2706 costly_str = "all_deps_costly";
2707 break;
2709 case true_store_to_load_dep_costly:
2710 costly_str = "true_store_to_load_dep_costly";
2711 break;
2713 case store_to_load_dep_costly:
2714 costly_str = "store_to_load_dep_costly";
2715 break;
2717 default:
2718 costly_str = costly_num;
2719 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
2720 break;
2723 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
2725 switch (rs6000_sched_insert_nops)
2727 case sched_finish_regroup_exact:
2728 nop_str = "sched_finish_regroup_exact";
2729 break;
2731 case sched_finish_pad_groups:
2732 nop_str = "sched_finish_pad_groups";
2733 break;
2735 case sched_finish_none:
2736 nop_str = "sched_finish_none";
2737 break;
2739 default:
2740 nop_str = nop_num;
2741 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
2742 break;
2745 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
2747 switch (rs6000_sdata)
2749 default:
2750 case SDATA_NONE:
2751 break;
2753 case SDATA_DATA:
2754 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
2755 break;
2757 case SDATA_SYSV:
2758 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
2759 break;
2761 case SDATA_EABI:
2762 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
2763 break;
2767 switch (rs6000_traceback)
2769 case traceback_default: trace_str = "default"; break;
2770 case traceback_none: trace_str = "none"; break;
2771 case traceback_part: trace_str = "part"; break;
2772 case traceback_full: trace_str = "full"; break;
2773 default: trace_str = "unknown"; break;
2776 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
2778 switch (rs6000_current_cmodel)
2780 case CMODEL_SMALL: cmodel_str = "small"; break;
2781 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
2782 case CMODEL_LARGE: cmodel_str = "large"; break;
2783 default: cmodel_str = "unknown"; break;
2786 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
2788 switch (rs6000_current_abi)
2790 case ABI_NONE: abi_str = "none"; break;
2791 case ABI_AIX: abi_str = "aix"; break;
2792 case ABI_ELFv2: abi_str = "ELFv2"; break;
2793 case ABI_V4: abi_str = "V4"; break;
2794 case ABI_DARWIN: abi_str = "darwin"; break;
2795 default: abi_str = "unknown"; break;
2798 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
2800 if (rs6000_altivec_abi)
2801 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
2803 if (rs6000_spe_abi)
2804 fprintf (stderr, DEBUG_FMT_S, "spe_abi", "true");
2806 if (rs6000_darwin64_abi)
2807 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
2809 if (rs6000_float_gprs)
2810 fprintf (stderr, DEBUG_FMT_S, "float_gprs", "true");
2812 fprintf (stderr, DEBUG_FMT_S, "fprs",
2813 (TARGET_FPRS ? "true" : "false"));
2815 fprintf (stderr, DEBUG_FMT_S, "single_float",
2816 (TARGET_SINGLE_FLOAT ? "true" : "false"));
2818 fprintf (stderr, DEBUG_FMT_S, "double_float",
2819 (TARGET_DOUBLE_FLOAT ? "true" : "false"));
2821 fprintf (stderr, DEBUG_FMT_S, "soft_float",
2822 (TARGET_SOFT_FLOAT ? "true" : "false"));
2824 fprintf (stderr, DEBUG_FMT_S, "e500_single",
2825 (TARGET_E500_SINGLE ? "true" : "false"));
2827 fprintf (stderr, DEBUG_FMT_S, "e500_double",
2828 (TARGET_E500_DOUBLE ? "true" : "false"));
2830 if (TARGET_LINK_STACK)
2831 fprintf (stderr, DEBUG_FMT_S, "link_stack", "true");
2833 fprintf (stderr, DEBUG_FMT_S, "lra", TARGET_LRA ? "true" : "false");
2835 if (TARGET_P8_FUSION)
2837 char options[80];
2839 strcpy (options, (TARGET_P9_FUSION) ? "power9" : "power8");
2840 if (TARGET_TOC_FUSION)
2841 strcat (options, ", toc");
2843 if (TARGET_P8_FUSION_SIGN)
2844 strcat (options, ", sign");
2846 fprintf (stderr, DEBUG_FMT_S, "fusion", options);
2849 fprintf (stderr, DEBUG_FMT_S, "plt-format",
2850 TARGET_SECURE_PLT ? "secure" : "bss");
2851 fprintf (stderr, DEBUG_FMT_S, "struct-return",
2852 aix_struct_return ? "aix" : "sysv");
2853 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
2854 fprintf (stderr, DEBUG_FMT_S, "sched_groups", tf[!!rs6000_sched_groups]);
2855 fprintf (stderr, DEBUG_FMT_S, "align_branch",
2856 tf[!!rs6000_align_branch_targets]);
2857 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
2858 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
2859 rs6000_long_double_type_size);
2860 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
2861 (int)rs6000_sched_restricted_insns_priority);
2862 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
2863 (int)END_BUILTINS);
2864 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
2865 (int)RS6000_BUILTIN_COUNT);
2867 fprintf (stderr, DEBUG_FMT_D, "Enable float128 on VSX",
2868 (int)TARGET_FLOAT128_ENABLE_TYPE);
2870 if (TARGET_VSX)
2871 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit scalar element",
2872 (int)VECTOR_ELEMENT_SCALAR_64BIT);
2874 if (TARGET_DIRECT_MOVE_128)
2875 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit mfvsrld element",
2876 (int)VECTOR_ELEMENT_MFVSRLD_64BIT);
2880 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2881 legitimate address support to figure out the appropriate addressing to
2882 use. */
2884 static void
2885 rs6000_setup_reg_addr_masks (void)
2887 ssize_t rc, reg, m, nregs;
2888 addr_mask_type any_addr_mask, addr_mask;
2890 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2892 machine_mode m2 = (machine_mode) m;
2893 bool complex_p = false;
2894 bool small_int_p = (m2 == QImode || m2 == HImode || m2 == SImode);
2895 size_t msize;
2897 if (COMPLEX_MODE_P (m2))
2899 complex_p = true;
2900 m2 = GET_MODE_INNER (m2);
2903 msize = GET_MODE_SIZE (m2);
2905 /* SDmode is special in that we want to access it only via REG+REG
2906 addressing on power7 and above, since we want to use the LFIWZX and
2907 STFIWZX instructions to load it. */
2908 bool indexed_only_p = (m == SDmode && TARGET_NO_SDMODE_STACK);
2910 any_addr_mask = 0;
2911 for (rc = FIRST_RELOAD_REG_CLASS; rc <= LAST_RELOAD_REG_CLASS; rc++)
2913 addr_mask = 0;
2914 reg = reload_reg_map[rc].reg;
2916 /* Can mode values go in the GPR/FPR/Altivec registers? */
2917 if (reg >= 0 && rs6000_hard_regno_mode_ok_p[m][reg])
2919 bool small_int_vsx_p = (small_int_p
2920 && (rc == RELOAD_REG_FPR
2921 || rc == RELOAD_REG_VMX));
2923 nregs = rs6000_hard_regno_nregs[m][reg];
2924 addr_mask |= RELOAD_REG_VALID;
2926 /* Indicate if the mode takes more than 1 physical register. If
2927 it takes a single register, indicate it can do REG+REG
2928 addressing. Small integers in VSX registers can only do
2929 REG+REG addressing. */
2930 if (small_int_vsx_p)
2931 addr_mask |= RELOAD_REG_INDEXED;
2932 else if (nregs > 1 || m == BLKmode || complex_p)
2933 addr_mask |= RELOAD_REG_MULTIPLE;
2934 else
2935 addr_mask |= RELOAD_REG_INDEXED;
2937 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2938 addressing. Restrict addressing on SPE for 64-bit types
2939 because of the SUBREG hackery used to address 64-bit floats in
2940 '32-bit' GPRs. If we allow scalars into Altivec registers,
2941 don't allow PRE_INC, PRE_DEC, or PRE_MODIFY. */
2943 if (TARGET_UPDATE
2944 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR)
2945 && msize <= 8
2946 && !VECTOR_MODE_P (m2)
2947 && !FLOAT128_VECTOR_P (m2)
2948 && !complex_p
2949 && !small_int_vsx_p
2950 && (m2 != DFmode || !TARGET_UPPER_REGS_DF)
2951 && (m2 != SFmode || !TARGET_UPPER_REGS_SF)
2952 && !(TARGET_E500_DOUBLE && msize == 8))
2954 addr_mask |= RELOAD_REG_PRE_INCDEC;
2956 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2957 we don't allow PRE_MODIFY for some multi-register
2958 operations. */
2959 switch (m)
2961 default:
2962 addr_mask |= RELOAD_REG_PRE_MODIFY;
2963 break;
2965 case DImode:
2966 if (TARGET_POWERPC64)
2967 addr_mask |= RELOAD_REG_PRE_MODIFY;
2968 break;
2970 case DFmode:
2971 case DDmode:
2972 if (TARGET_DF_INSN)
2973 addr_mask |= RELOAD_REG_PRE_MODIFY;
2974 break;
2979 /* GPR and FPR registers can do REG+OFFSET addressing, except
2980 possibly for SDmode. ISA 3.0 (i.e. power9) adds D-form addressing
2981 for 64-bit scalars and 32-bit SFmode to altivec registers. */
2982 if ((addr_mask != 0) && !indexed_only_p
2983 && msize <= 8
2984 && (rc == RELOAD_REG_GPR
2985 || ((msize == 8 || m2 == SFmode)
2986 && (rc == RELOAD_REG_FPR
2987 || (rc == RELOAD_REG_VMX
2988 && TARGET_P9_DFORM_SCALAR)))))
2989 addr_mask |= RELOAD_REG_OFFSET;
2991 /* VSX registers can do REG+OFFSET addresssing if ISA 3.0
2992 instructions are enabled. The offset for 128-bit VSX registers is
2993 only 12-bits. While GPRs can handle the full offset range, VSX
2994 registers can only handle the restricted range. */
2995 else if ((addr_mask != 0) && !indexed_only_p
2996 && msize == 16 && TARGET_P9_DFORM_VECTOR
2997 && (ALTIVEC_OR_VSX_VECTOR_MODE (m2)
2998 || (m2 == TImode && TARGET_VSX_TIMODE)))
3000 addr_mask |= RELOAD_REG_OFFSET;
3001 if (rc == RELOAD_REG_FPR || rc == RELOAD_REG_VMX)
3002 addr_mask |= RELOAD_REG_QUAD_OFFSET;
3005 /* VMX registers can do (REG & -16) and ((REG+REG) & -16)
3006 addressing on 128-bit types. */
3007 if (rc == RELOAD_REG_VMX && msize == 16
3008 && (addr_mask & RELOAD_REG_VALID) != 0)
3009 addr_mask |= RELOAD_REG_AND_M16;
3011 reg_addr[m].addr_mask[rc] = addr_mask;
3012 any_addr_mask |= addr_mask;
3015 reg_addr[m].addr_mask[RELOAD_REG_ANY] = any_addr_mask;
3020 /* Initialize the various global tables that are based on register size. */
3021 static void
3022 rs6000_init_hard_regno_mode_ok (bool global_init_p)
3024 ssize_t r, m, c;
3025 int align64;
3026 int align32;
3028 /* Precalculate REGNO_REG_CLASS. */
3029 rs6000_regno_regclass[0] = GENERAL_REGS;
3030 for (r = 1; r < 32; ++r)
3031 rs6000_regno_regclass[r] = BASE_REGS;
3033 for (r = 32; r < 64; ++r)
3034 rs6000_regno_regclass[r] = FLOAT_REGS;
3036 for (r = 64; r < FIRST_PSEUDO_REGISTER; ++r)
3037 rs6000_regno_regclass[r] = NO_REGS;
3039 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
3040 rs6000_regno_regclass[r] = ALTIVEC_REGS;
3042 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
3043 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
3044 rs6000_regno_regclass[r] = CR_REGS;
3046 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
3047 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
3048 rs6000_regno_regclass[CA_REGNO] = NO_REGS;
3049 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
3050 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
3051 rs6000_regno_regclass[SPE_ACC_REGNO] = SPE_ACC_REGS;
3052 rs6000_regno_regclass[SPEFSCR_REGNO] = SPEFSCR_REGS;
3053 rs6000_regno_regclass[TFHAR_REGNO] = SPR_REGS;
3054 rs6000_regno_regclass[TFIAR_REGNO] = SPR_REGS;
3055 rs6000_regno_regclass[TEXASR_REGNO] = SPR_REGS;
3056 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
3057 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
3059 /* Precalculate register class to simpler reload register class. We don't
3060 need all of the register classes that are combinations of different
3061 classes, just the simple ones that have constraint letters. */
3062 for (c = 0; c < N_REG_CLASSES; c++)
3063 reg_class_to_reg_type[c] = NO_REG_TYPE;
3065 reg_class_to_reg_type[(int)GENERAL_REGS] = GPR_REG_TYPE;
3066 reg_class_to_reg_type[(int)BASE_REGS] = GPR_REG_TYPE;
3067 reg_class_to_reg_type[(int)VSX_REGS] = VSX_REG_TYPE;
3068 reg_class_to_reg_type[(int)VRSAVE_REGS] = SPR_REG_TYPE;
3069 reg_class_to_reg_type[(int)VSCR_REGS] = SPR_REG_TYPE;
3070 reg_class_to_reg_type[(int)LINK_REGS] = SPR_REG_TYPE;
3071 reg_class_to_reg_type[(int)CTR_REGS] = SPR_REG_TYPE;
3072 reg_class_to_reg_type[(int)LINK_OR_CTR_REGS] = SPR_REG_TYPE;
3073 reg_class_to_reg_type[(int)CR_REGS] = CR_REG_TYPE;
3074 reg_class_to_reg_type[(int)CR0_REGS] = CR_REG_TYPE;
3075 reg_class_to_reg_type[(int)SPE_ACC_REGS] = SPE_ACC_TYPE;
3076 reg_class_to_reg_type[(int)SPEFSCR_REGS] = SPEFSCR_REG_TYPE;
3078 if (TARGET_VSX)
3080 reg_class_to_reg_type[(int)FLOAT_REGS] = VSX_REG_TYPE;
3081 reg_class_to_reg_type[(int)ALTIVEC_REGS] = VSX_REG_TYPE;
3083 else
3085 reg_class_to_reg_type[(int)FLOAT_REGS] = FPR_REG_TYPE;
3086 reg_class_to_reg_type[(int)ALTIVEC_REGS] = ALTIVEC_REG_TYPE;
3089 /* Precalculate the valid memory formats as well as the vector information,
3090 this must be set up before the rs6000_hard_regno_nregs_internal calls
3091 below. */
3092 gcc_assert ((int)VECTOR_NONE == 0);
3093 memset ((void *) &rs6000_vector_unit[0], '\0', sizeof (rs6000_vector_unit));
3094 memset ((void *) &rs6000_vector_mem[0], '\0', sizeof (rs6000_vector_unit));
3096 gcc_assert ((int)CODE_FOR_nothing == 0);
3097 memset ((void *) &reg_addr[0], '\0', sizeof (reg_addr));
3099 gcc_assert ((int)NO_REGS == 0);
3100 memset ((void *) &rs6000_constraints[0], '\0', sizeof (rs6000_constraints));
3102 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
3103 believes it can use native alignment or still uses 128-bit alignment. */
3104 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
3106 align64 = 64;
3107 align32 = 32;
3109 else
3111 align64 = 128;
3112 align32 = 128;
3115 /* KF mode (IEEE 128-bit in VSX registers). We do not have arithmetic, so
3116 only set the memory modes. Include TFmode if -mabi=ieeelongdouble. */
3117 if (TARGET_FLOAT128_TYPE)
3119 rs6000_vector_mem[KFmode] = VECTOR_VSX;
3120 rs6000_vector_align[KFmode] = 128;
3122 if (FLOAT128_IEEE_P (TFmode))
3124 rs6000_vector_mem[TFmode] = VECTOR_VSX;
3125 rs6000_vector_align[TFmode] = 128;
3129 /* V2DF mode, VSX only. */
3130 if (TARGET_VSX)
3132 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
3133 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
3134 rs6000_vector_align[V2DFmode] = align64;
3137 /* V4SF mode, either VSX or Altivec. */
3138 if (TARGET_VSX)
3140 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
3141 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
3142 rs6000_vector_align[V4SFmode] = align32;
3144 else if (TARGET_ALTIVEC)
3146 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
3147 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
3148 rs6000_vector_align[V4SFmode] = align32;
3151 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
3152 and stores. */
3153 if (TARGET_ALTIVEC)
3155 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
3156 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
3157 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
3158 rs6000_vector_align[V4SImode] = align32;
3159 rs6000_vector_align[V8HImode] = align32;
3160 rs6000_vector_align[V16QImode] = align32;
3162 if (TARGET_VSX)
3164 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
3165 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
3166 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
3168 else
3170 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
3171 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
3172 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
3176 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
3177 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
3178 if (TARGET_VSX)
3180 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
3181 rs6000_vector_unit[V2DImode]
3182 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3183 rs6000_vector_align[V2DImode] = align64;
3185 rs6000_vector_mem[V1TImode] = VECTOR_VSX;
3186 rs6000_vector_unit[V1TImode]
3187 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3188 rs6000_vector_align[V1TImode] = 128;
3191 /* DFmode, see if we want to use the VSX unit. Memory is handled
3192 differently, so don't set rs6000_vector_mem. */
3193 if (TARGET_VSX && TARGET_VSX_SCALAR_DOUBLE)
3195 rs6000_vector_unit[DFmode] = VECTOR_VSX;
3196 rs6000_vector_align[DFmode] = 64;
3199 /* SFmode, see if we want to use the VSX unit. */
3200 if (TARGET_P8_VECTOR && TARGET_VSX_SCALAR_FLOAT)
3202 rs6000_vector_unit[SFmode] = VECTOR_VSX;
3203 rs6000_vector_align[SFmode] = 32;
3206 /* Allow TImode in VSX register and set the VSX memory macros. */
3207 if (TARGET_VSX && TARGET_VSX_TIMODE)
3209 rs6000_vector_mem[TImode] = VECTOR_VSX;
3210 rs6000_vector_align[TImode] = align64;
3213 /* TODO add SPE and paired floating point vector support. */
3215 /* Register class constraints for the constraints that depend on compile
3216 switches. When the VSX code was added, different constraints were added
3217 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
3218 of the VSX registers are used. The register classes for scalar floating
3219 point types is set, based on whether we allow that type into the upper
3220 (Altivec) registers. GCC has register classes to target the Altivec
3221 registers for load/store operations, to select using a VSX memory
3222 operation instead of the traditional floating point operation. The
3223 constraints are:
3225 d - Register class to use with traditional DFmode instructions.
3226 f - Register class to use with traditional SFmode instructions.
3227 v - Altivec register.
3228 wa - Any VSX register.
3229 wc - Reserved to represent individual CR bits (used in LLVM).
3230 wd - Preferred register class for V2DFmode.
3231 wf - Preferred register class for V4SFmode.
3232 wg - Float register for power6x move insns.
3233 wh - FP register for direct move instructions.
3234 wi - FP or VSX register to hold 64-bit integers for VSX insns.
3235 wj - FP or VSX register to hold 64-bit integers for direct moves.
3236 wk - FP or VSX register to hold 64-bit doubles for direct moves.
3237 wl - Float register if we can do 32-bit signed int loads.
3238 wm - VSX register for ISA 2.07 direct move operations.
3239 wn - always NO_REGS.
3240 wr - GPR if 64-bit mode is permitted.
3241 ws - Register class to do ISA 2.06 DF operations.
3242 wt - VSX register for TImode in VSX registers.
3243 wu - Altivec register for ISA 2.07 VSX SF/SI load/stores.
3244 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
3245 ww - Register class to do SF conversions in with VSX operations.
3246 wx - Float register if we can do 32-bit int stores.
3247 wy - Register class to do ISA 2.07 SF operations.
3248 wz - Float register if we can do 32-bit unsigned int loads.
3249 wH - Altivec register if SImode is allowed in VSX registers.
3250 wI - VSX register if SImode is allowed in VSX registers.
3251 wJ - VSX register if QImode/HImode are allowed in VSX registers.
3252 wK - Altivec register if QImode/HImode are allowed in VSX registers. */
3254 if (TARGET_HARD_FLOAT && TARGET_FPRS)
3255 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS; /* SFmode */
3257 if (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
3258 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS; /* DFmode */
3260 if (TARGET_VSX)
3262 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
3263 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS; /* V2DFmode */
3264 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS; /* V4SFmode */
3266 if (TARGET_VSX_TIMODE)
3267 rs6000_constraints[RS6000_CONSTRAINT_wt] = VSX_REGS; /* TImode */
3269 if (TARGET_UPPER_REGS_DF) /* DFmode */
3271 rs6000_constraints[RS6000_CONSTRAINT_ws] = VSX_REGS;
3272 rs6000_constraints[RS6000_CONSTRAINT_wv] = ALTIVEC_REGS;
3274 else
3275 rs6000_constraints[RS6000_CONSTRAINT_ws] = FLOAT_REGS;
3277 if (TARGET_UPPER_REGS_DI) /* DImode */
3278 rs6000_constraints[RS6000_CONSTRAINT_wi] = VSX_REGS;
3279 else
3280 rs6000_constraints[RS6000_CONSTRAINT_wi] = FLOAT_REGS;
3283 /* Add conditional constraints based on various options, to allow us to
3284 collapse multiple insn patterns. */
3285 if (TARGET_ALTIVEC)
3286 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
3288 if (TARGET_MFPGPR) /* DFmode */
3289 rs6000_constraints[RS6000_CONSTRAINT_wg] = FLOAT_REGS;
3291 if (TARGET_LFIWAX)
3292 rs6000_constraints[RS6000_CONSTRAINT_wl] = FLOAT_REGS; /* DImode */
3294 if (TARGET_DIRECT_MOVE)
3296 rs6000_constraints[RS6000_CONSTRAINT_wh] = FLOAT_REGS;
3297 rs6000_constraints[RS6000_CONSTRAINT_wj] /* DImode */
3298 = rs6000_constraints[RS6000_CONSTRAINT_wi];
3299 rs6000_constraints[RS6000_CONSTRAINT_wk] /* DFmode */
3300 = rs6000_constraints[RS6000_CONSTRAINT_ws];
3301 rs6000_constraints[RS6000_CONSTRAINT_wm] = VSX_REGS;
3304 if (TARGET_POWERPC64)
3306 rs6000_constraints[RS6000_CONSTRAINT_wr] = GENERAL_REGS;
3307 rs6000_constraints[RS6000_CONSTRAINT_wA] = BASE_REGS;
3310 if (TARGET_P8_VECTOR && TARGET_UPPER_REGS_SF) /* SFmode */
3312 rs6000_constraints[RS6000_CONSTRAINT_wu] = ALTIVEC_REGS;
3313 rs6000_constraints[RS6000_CONSTRAINT_wy] = VSX_REGS;
3314 rs6000_constraints[RS6000_CONSTRAINT_ww] = VSX_REGS;
3316 else if (TARGET_P8_VECTOR)
3318 rs6000_constraints[RS6000_CONSTRAINT_wy] = FLOAT_REGS;
3319 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
3321 else if (TARGET_VSX)
3322 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
3324 if (TARGET_STFIWX)
3325 rs6000_constraints[RS6000_CONSTRAINT_wx] = FLOAT_REGS; /* DImode */
3327 if (TARGET_LFIWZX)
3328 rs6000_constraints[RS6000_CONSTRAINT_wz] = FLOAT_REGS; /* DImode */
3330 if (TARGET_FLOAT128_TYPE)
3332 rs6000_constraints[RS6000_CONSTRAINT_wq] = VSX_REGS; /* KFmode */
3333 if (FLOAT128_IEEE_P (TFmode))
3334 rs6000_constraints[RS6000_CONSTRAINT_wp] = VSX_REGS; /* TFmode */
3337 /* Support for new D-form instructions. */
3338 if (TARGET_P9_DFORM_SCALAR)
3339 rs6000_constraints[RS6000_CONSTRAINT_wb] = ALTIVEC_REGS;
3341 /* Support for ISA 3.0 (power9) vectors. */
3342 if (TARGET_P9_VECTOR)
3343 rs6000_constraints[RS6000_CONSTRAINT_wo] = VSX_REGS;
3345 /* Support for new direct moves (ISA 3.0 + 64bit). */
3346 if (TARGET_DIRECT_MOVE_128)
3347 rs6000_constraints[RS6000_CONSTRAINT_we] = VSX_REGS;
3349 /* Support small integers in VSX registers. */
3350 if (TARGET_VSX_SMALL_INTEGER)
3352 rs6000_constraints[RS6000_CONSTRAINT_wH] = ALTIVEC_REGS;
3353 rs6000_constraints[RS6000_CONSTRAINT_wI] = FLOAT_REGS;
3354 if (TARGET_P9_VECTOR)
3356 rs6000_constraints[RS6000_CONSTRAINT_wJ] = FLOAT_REGS;
3357 rs6000_constraints[RS6000_CONSTRAINT_wK] = ALTIVEC_REGS;
3361 /* Set up the reload helper and direct move functions. */
3362 if (TARGET_VSX || TARGET_ALTIVEC)
3364 if (TARGET_64BIT)
3366 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_di_store;
3367 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_di_load;
3368 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_di_store;
3369 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_di_load;
3370 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_di_store;
3371 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_di_load;
3372 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_di_store;
3373 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_di_load;
3374 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_di_store;
3375 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_di_load;
3376 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_di_store;
3377 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_di_load;
3378 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_di_store;
3379 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_di_load;
3380 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_di_store;
3381 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_di_load;
3382 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_di_store;
3383 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_di_load;
3384 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_di_store;
3385 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_di_load;
3387 if (FLOAT128_VECTOR_P (KFmode))
3389 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_di_store;
3390 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_di_load;
3393 if (FLOAT128_VECTOR_P (TFmode))
3395 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_di_store;
3396 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_di_load;
3399 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3400 available. */
3401 if (TARGET_NO_SDMODE_STACK)
3403 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_di_store;
3404 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_di_load;
3407 if (TARGET_VSX_TIMODE)
3409 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_di_store;
3410 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_di_load;
3413 if (TARGET_DIRECT_MOVE && !TARGET_DIRECT_MOVE_128)
3415 reg_addr[TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxti;
3416 reg_addr[V1TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv1ti;
3417 reg_addr[V2DFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2df;
3418 reg_addr[V2DImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2di;
3419 reg_addr[V4SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4sf;
3420 reg_addr[V4SImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4si;
3421 reg_addr[V8HImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv8hi;
3422 reg_addr[V16QImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv16qi;
3423 reg_addr[SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxsf;
3425 reg_addr[TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprti;
3426 reg_addr[V1TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv1ti;
3427 reg_addr[V2DFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2df;
3428 reg_addr[V2DImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2di;
3429 reg_addr[V4SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4sf;
3430 reg_addr[V4SImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4si;
3431 reg_addr[V8HImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv8hi;
3432 reg_addr[V16QImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv16qi;
3433 reg_addr[SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprsf;
3435 if (FLOAT128_VECTOR_P (KFmode))
3437 reg_addr[KFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxkf;
3438 reg_addr[KFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprkf;
3441 if (FLOAT128_VECTOR_P (TFmode))
3443 reg_addr[TFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxtf;
3444 reg_addr[TFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprtf;
3448 else
3450 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_si_store;
3451 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_si_load;
3452 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_si_store;
3453 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_si_load;
3454 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_si_store;
3455 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_si_load;
3456 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_si_store;
3457 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_si_load;
3458 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_si_store;
3459 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_si_load;
3460 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_si_store;
3461 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_si_load;
3462 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_si_store;
3463 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_si_load;
3464 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_si_store;
3465 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_si_load;
3466 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_si_store;
3467 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_si_load;
3468 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_si_store;
3469 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_si_load;
3471 if (FLOAT128_VECTOR_P (KFmode))
3473 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_si_store;
3474 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_si_load;
3477 if (FLOAT128_IEEE_P (TFmode))
3479 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_si_store;
3480 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_si_load;
3483 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3484 available. */
3485 if (TARGET_NO_SDMODE_STACK)
3487 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_si_store;
3488 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_si_load;
3491 if (TARGET_VSX_TIMODE)
3493 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_si_store;
3494 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_si_load;
3497 if (TARGET_DIRECT_MOVE)
3499 reg_addr[DImode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdi;
3500 reg_addr[DDmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdd;
3501 reg_addr[DFmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdf;
3505 if (TARGET_UPPER_REGS_DF)
3506 reg_addr[DFmode].scalar_in_vmx_p = true;
3508 if (TARGET_UPPER_REGS_DI)
3509 reg_addr[DImode].scalar_in_vmx_p = true;
3511 if (TARGET_UPPER_REGS_SF)
3512 reg_addr[SFmode].scalar_in_vmx_p = true;
3514 if (TARGET_VSX_SMALL_INTEGER)
3516 reg_addr[SImode].scalar_in_vmx_p = true;
3517 if (TARGET_P9_VECTOR)
3519 reg_addr[HImode].scalar_in_vmx_p = true;
3520 reg_addr[QImode].scalar_in_vmx_p = true;
3525 /* Setup the fusion operations. */
3526 if (TARGET_P8_FUSION)
3528 reg_addr[QImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_qi;
3529 reg_addr[HImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_hi;
3530 reg_addr[SImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_si;
3531 if (TARGET_64BIT)
3532 reg_addr[DImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_di;
3535 if (TARGET_P9_FUSION)
3537 struct fuse_insns {
3538 enum machine_mode mode; /* mode of the fused type. */
3539 enum machine_mode pmode; /* pointer mode. */
3540 enum rs6000_reload_reg_type rtype; /* register type. */
3541 enum insn_code load; /* load insn. */
3542 enum insn_code store; /* store insn. */
3545 static const struct fuse_insns addis_insns[] = {
3546 { SFmode, DImode, RELOAD_REG_FPR,
3547 CODE_FOR_fusion_vsx_di_sf_load,
3548 CODE_FOR_fusion_vsx_di_sf_store },
3550 { SFmode, SImode, RELOAD_REG_FPR,
3551 CODE_FOR_fusion_vsx_si_sf_load,
3552 CODE_FOR_fusion_vsx_si_sf_store },
3554 { DFmode, DImode, RELOAD_REG_FPR,
3555 CODE_FOR_fusion_vsx_di_df_load,
3556 CODE_FOR_fusion_vsx_di_df_store },
3558 { DFmode, SImode, RELOAD_REG_FPR,
3559 CODE_FOR_fusion_vsx_si_df_load,
3560 CODE_FOR_fusion_vsx_si_df_store },
3562 { DImode, DImode, RELOAD_REG_FPR,
3563 CODE_FOR_fusion_vsx_di_di_load,
3564 CODE_FOR_fusion_vsx_di_di_store },
3566 { DImode, SImode, RELOAD_REG_FPR,
3567 CODE_FOR_fusion_vsx_si_di_load,
3568 CODE_FOR_fusion_vsx_si_di_store },
3570 { QImode, DImode, RELOAD_REG_GPR,
3571 CODE_FOR_fusion_gpr_di_qi_load,
3572 CODE_FOR_fusion_gpr_di_qi_store },
3574 { QImode, SImode, RELOAD_REG_GPR,
3575 CODE_FOR_fusion_gpr_si_qi_load,
3576 CODE_FOR_fusion_gpr_si_qi_store },
3578 { HImode, DImode, RELOAD_REG_GPR,
3579 CODE_FOR_fusion_gpr_di_hi_load,
3580 CODE_FOR_fusion_gpr_di_hi_store },
3582 { HImode, SImode, RELOAD_REG_GPR,
3583 CODE_FOR_fusion_gpr_si_hi_load,
3584 CODE_FOR_fusion_gpr_si_hi_store },
3586 { SImode, DImode, RELOAD_REG_GPR,
3587 CODE_FOR_fusion_gpr_di_si_load,
3588 CODE_FOR_fusion_gpr_di_si_store },
3590 { SImode, SImode, RELOAD_REG_GPR,
3591 CODE_FOR_fusion_gpr_si_si_load,
3592 CODE_FOR_fusion_gpr_si_si_store },
3594 { SFmode, DImode, RELOAD_REG_GPR,
3595 CODE_FOR_fusion_gpr_di_sf_load,
3596 CODE_FOR_fusion_gpr_di_sf_store },
3598 { SFmode, SImode, RELOAD_REG_GPR,
3599 CODE_FOR_fusion_gpr_si_sf_load,
3600 CODE_FOR_fusion_gpr_si_sf_store },
3602 { DImode, DImode, RELOAD_REG_GPR,
3603 CODE_FOR_fusion_gpr_di_di_load,
3604 CODE_FOR_fusion_gpr_di_di_store },
3606 { DFmode, DImode, RELOAD_REG_GPR,
3607 CODE_FOR_fusion_gpr_di_df_load,
3608 CODE_FOR_fusion_gpr_di_df_store },
3611 enum machine_mode cur_pmode = Pmode;
3612 size_t i;
3614 for (i = 0; i < ARRAY_SIZE (addis_insns); i++)
3616 enum machine_mode xmode = addis_insns[i].mode;
3617 enum rs6000_reload_reg_type rtype = addis_insns[i].rtype;
3619 if (addis_insns[i].pmode != cur_pmode)
3620 continue;
3622 if (rtype == RELOAD_REG_FPR
3623 && (!TARGET_HARD_FLOAT || !TARGET_FPRS))
3624 continue;
3626 reg_addr[xmode].fusion_addis_ld[rtype] = addis_insns[i].load;
3627 reg_addr[xmode].fusion_addis_st[rtype] = addis_insns[i].store;
3629 if (rtype == RELOAD_REG_FPR && TARGET_P9_DFORM_SCALAR)
3631 reg_addr[xmode].fusion_addis_ld[RELOAD_REG_VMX]
3632 = addis_insns[i].load;
3633 reg_addr[xmode].fusion_addis_st[RELOAD_REG_VMX]
3634 = addis_insns[i].store;
3639 /* Note which types we support fusing TOC setup plus memory insn. We only do
3640 fused TOCs for medium/large code models. */
3641 if (TARGET_P8_FUSION && TARGET_TOC_FUSION && TARGET_POWERPC64
3642 && (TARGET_CMODEL != CMODEL_SMALL))
3644 reg_addr[QImode].fused_toc = true;
3645 reg_addr[HImode].fused_toc = true;
3646 reg_addr[SImode].fused_toc = true;
3647 reg_addr[DImode].fused_toc = true;
3648 if (TARGET_HARD_FLOAT && TARGET_FPRS)
3650 if (TARGET_SINGLE_FLOAT)
3651 reg_addr[SFmode].fused_toc = true;
3652 if (TARGET_DOUBLE_FLOAT)
3653 reg_addr[DFmode].fused_toc = true;
3657 /* Precalculate HARD_REGNO_NREGS. */
3658 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3659 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3660 rs6000_hard_regno_nregs[m][r]
3661 = rs6000_hard_regno_nregs_internal (r, (machine_mode)m);
3663 /* Precalculate HARD_REGNO_MODE_OK. */
3664 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3665 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3666 if (rs6000_hard_regno_mode_ok (r, (machine_mode)m))
3667 rs6000_hard_regno_mode_ok_p[m][r] = true;
3669 /* Precalculate CLASS_MAX_NREGS sizes. */
3670 for (c = 0; c < LIM_REG_CLASSES; ++c)
3672 int reg_size;
3674 if (TARGET_VSX && VSX_REG_CLASS_P (c))
3675 reg_size = UNITS_PER_VSX_WORD;
3677 else if (c == ALTIVEC_REGS)
3678 reg_size = UNITS_PER_ALTIVEC_WORD;
3680 else if (c == FLOAT_REGS)
3681 reg_size = UNITS_PER_FP_WORD;
3683 else
3684 reg_size = UNITS_PER_WORD;
3686 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3688 machine_mode m2 = (machine_mode)m;
3689 int reg_size2 = reg_size;
3691 /* TDmode & IBM 128-bit floating point always takes 2 registers, even
3692 in VSX. */
3693 if (TARGET_VSX && VSX_REG_CLASS_P (c) && FLOAT128_2REG_P (m))
3694 reg_size2 = UNITS_PER_FP_WORD;
3696 rs6000_class_max_nregs[m][c]
3697 = (GET_MODE_SIZE (m2) + reg_size2 - 1) / reg_size2;
3701 if (TARGET_E500_DOUBLE)
3702 rs6000_class_max_nregs[DFmode][GENERAL_REGS] = 1;
3704 /* Calculate which modes to automatically generate code to use a the
3705 reciprocal divide and square root instructions. In the future, possibly
3706 automatically generate the instructions even if the user did not specify
3707 -mrecip. The older machines double precision reciprocal sqrt estimate is
3708 not accurate enough. */
3709 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
3710 if (TARGET_FRES)
3711 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3712 if (TARGET_FRE)
3713 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3714 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3715 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3716 if (VECTOR_UNIT_VSX_P (V2DFmode))
3717 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3719 if (TARGET_FRSQRTES)
3720 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3721 if (TARGET_FRSQRTE)
3722 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3723 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3724 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3725 if (VECTOR_UNIT_VSX_P (V2DFmode))
3726 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3728 if (rs6000_recip_control)
3730 if (!flag_finite_math_only)
3731 warning (0, "-mrecip requires -ffinite-math or -ffast-math");
3732 if (flag_trapping_math)
3733 warning (0, "-mrecip requires -fno-trapping-math or -ffast-math");
3734 if (!flag_reciprocal_math)
3735 warning (0, "-mrecip requires -freciprocal-math or -ffast-math");
3736 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
3738 if (RS6000_RECIP_HAVE_RE_P (SFmode)
3739 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
3740 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3742 if (RS6000_RECIP_HAVE_RE_P (DFmode)
3743 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
3744 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3746 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
3747 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
3748 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3750 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
3751 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
3752 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3754 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
3755 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
3756 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3758 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
3759 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
3760 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3762 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
3763 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
3764 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3766 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
3767 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
3768 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3772 /* Update the addr mask bits in reg_addr to help secondary reload and go if
3773 legitimate address support to figure out the appropriate addressing to
3774 use. */
3775 rs6000_setup_reg_addr_masks ();
3777 if (global_init_p || TARGET_DEBUG_TARGET)
3779 if (TARGET_DEBUG_REG)
3780 rs6000_debug_reg_global ();
3782 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
3783 fprintf (stderr,
3784 "SImode variable mult cost = %d\n"
3785 "SImode constant mult cost = %d\n"
3786 "SImode short constant mult cost = %d\n"
3787 "DImode multipliciation cost = %d\n"
3788 "SImode division cost = %d\n"
3789 "DImode division cost = %d\n"
3790 "Simple fp operation cost = %d\n"
3791 "DFmode multiplication cost = %d\n"
3792 "SFmode division cost = %d\n"
3793 "DFmode division cost = %d\n"
3794 "cache line size = %d\n"
3795 "l1 cache size = %d\n"
3796 "l2 cache size = %d\n"
3797 "simultaneous prefetches = %d\n"
3798 "\n",
3799 rs6000_cost->mulsi,
3800 rs6000_cost->mulsi_const,
3801 rs6000_cost->mulsi_const9,
3802 rs6000_cost->muldi,
3803 rs6000_cost->divsi,
3804 rs6000_cost->divdi,
3805 rs6000_cost->fp,
3806 rs6000_cost->dmul,
3807 rs6000_cost->sdiv,
3808 rs6000_cost->ddiv,
3809 rs6000_cost->cache_line_size,
3810 rs6000_cost->l1_cache_size,
3811 rs6000_cost->l2_cache_size,
3812 rs6000_cost->simultaneous_prefetches);
3816 #if TARGET_MACHO
3817 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
3819 static void
3820 darwin_rs6000_override_options (void)
3822 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
3823 off. */
3824 rs6000_altivec_abi = 1;
3825 TARGET_ALTIVEC_VRSAVE = 1;
3826 rs6000_current_abi = ABI_DARWIN;
3828 if (DEFAULT_ABI == ABI_DARWIN
3829 && TARGET_64BIT)
3830 darwin_one_byte_bool = 1;
3832 if (TARGET_64BIT && ! TARGET_POWERPC64)
3834 rs6000_isa_flags |= OPTION_MASK_POWERPC64;
3835 warning (0, "-m64 requires PowerPC64 architecture, enabling");
3837 if (flag_mkernel)
3839 rs6000_default_long_calls = 1;
3840 rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT;
3843 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3844 Altivec. */
3845 if (!flag_mkernel && !flag_apple_kext
3846 && TARGET_64BIT
3847 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC))
3848 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3850 /* Unless the user (not the configurer) has explicitly overridden
3851 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3852 G4 unless targeting the kernel. */
3853 if (!flag_mkernel
3854 && !flag_apple_kext
3855 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
3856 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC)
3857 && ! global_options_set.x_rs6000_cpu_index)
3859 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3862 #endif
3864 /* If not otherwise specified by a target, make 'long double' equivalent to
3865 'double'. */
3867 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3868 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3869 #endif
3871 /* Return the builtin mask of the various options used that could affect which
3872 builtins were used. In the past we used target_flags, but we've run out of
3873 bits, and some options like SPE and PAIRED are no longer in
3874 target_flags. */
3876 HOST_WIDE_INT
3877 rs6000_builtin_mask_calculate (void)
3879 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
3880 | ((TARGET_CMPB) ? RS6000_BTM_CMPB : 0)
3881 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
3882 | ((TARGET_SPE) ? RS6000_BTM_SPE : 0)
3883 | ((TARGET_PAIRED_FLOAT) ? RS6000_BTM_PAIRED : 0)
3884 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
3885 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
3886 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
3887 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
3888 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
3889 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0)
3890 | ((TARGET_P8_VECTOR) ? RS6000_BTM_P8_VECTOR : 0)
3891 | ((TARGET_P9_VECTOR) ? RS6000_BTM_P9_VECTOR : 0)
3892 | ((TARGET_P9_MISC) ? RS6000_BTM_P9_MISC : 0)
3893 | ((TARGET_MODULO) ? RS6000_BTM_MODULO : 0)
3894 | ((TARGET_64BIT) ? RS6000_BTM_64BIT : 0)
3895 | ((TARGET_CRYPTO) ? RS6000_BTM_CRYPTO : 0)
3896 | ((TARGET_HTM) ? RS6000_BTM_HTM : 0)
3897 | ((TARGET_DFP) ? RS6000_BTM_DFP : 0)
3898 | ((TARGET_HARD_FLOAT) ? RS6000_BTM_HARD_FLOAT : 0)
3899 | ((TARGET_LONG_DOUBLE_128) ? RS6000_BTM_LDBL128 : 0)
3900 | ((TARGET_FLOAT128_TYPE) ? RS6000_BTM_FLOAT128 : 0));
3903 /* Implement TARGET_MD_ASM_ADJUST. All asm statements are considered
3904 to clobber the XER[CA] bit because clobbering that bit without telling
3905 the compiler worked just fine with versions of GCC before GCC 5, and
3906 breaking a lot of older code in ways that are hard to track down is
3907 not such a great idea. */
3909 static rtx_insn *
3910 rs6000_md_asm_adjust (vec<rtx> &/*outputs*/, vec<rtx> &/*inputs*/,
3911 vec<const char *> &/*constraints*/,
3912 vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs)
3914 clobbers.safe_push (gen_rtx_REG (SImode, CA_REGNO));
3915 SET_HARD_REG_BIT (clobbered_regs, CA_REGNO);
3916 return NULL;
3919 /* Override command line options.
3921 Combine build-specific configuration information with options
3922 specified on the command line to set various state variables which
3923 influence code generation, optimization, and expansion of built-in
3924 functions. Assure that command-line configuration preferences are
3925 compatible with each other and with the build configuration; issue
3926 warnings while adjusting configuration or error messages while
3927 rejecting configuration.
3929 Upon entry to this function:
3931 This function is called once at the beginning of
3932 compilation, and then again at the start and end of compiling
3933 each section of code that has a different configuration, as
3934 indicated, for example, by adding the
3936 __attribute__((__target__("cpu=power9")))
3938 qualifier to a function definition or, for example, by bracketing
3939 code between
3941 #pragma GCC target("altivec")
3945 #pragma GCC reset_options
3947 directives. Parameter global_init_p is true for the initial
3948 invocation, which initializes global variables, and false for all
3949 subsequent invocations.
3952 Various global state information is assumed to be valid. This
3953 includes OPTION_TARGET_CPU_DEFAULT, representing the name of the
3954 default CPU specified at build configure time, TARGET_DEFAULT,
3955 representing the default set of option flags for the default
3956 target, and global_options_set.x_rs6000_isa_flags, representing
3957 which options were requested on the command line.
3959 Upon return from this function:
3961 rs6000_isa_flags_explicit has a non-zero bit for each flag that
3962 was set by name on the command line. Additionally, if certain
3963 attributes are automatically enabled or disabled by this function
3964 in order to assure compatibility between options and
3965 configuration, the flags associated with those attributes are
3966 also set. By setting these "explicit bits", we avoid the risk
3967 that other code might accidentally overwrite these particular
3968 attributes with "default values".
3970 The various bits of rs6000_isa_flags are set to indicate the
3971 target options that have been selected for the most current
3972 compilation efforts. This has the effect of also turning on the
3973 associated TARGET_XXX values since these are macros which are
3974 generally defined to test the corresponding bit of the
3975 rs6000_isa_flags variable.
3977 The variable rs6000_builtin_mask is set to represent the target
3978 options for the most current compilation efforts, consistent with
3979 the current contents of rs6000_isa_flags. This variable controls
3980 expansion of built-in functions.
3982 Various other global variables and fields of global structures
3983 (over 50 in all) are initialized to reflect the desired options
3984 for the most current compilation efforts. */
3986 static bool
3987 rs6000_option_override_internal (bool global_init_p)
3989 bool ret = true;
3990 bool have_cpu = false;
3992 /* The default cpu requested at configure time, if any. */
3993 const char *implicit_cpu = OPTION_TARGET_CPU_DEFAULT;
3995 HOST_WIDE_INT set_masks;
3996 HOST_WIDE_INT ignore_masks;
3997 int cpu_index;
3998 int tune_index;
3999 struct cl_target_option *main_target_opt
4000 = ((global_init_p || target_option_default_node == NULL)
4001 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
4003 /* Print defaults. */
4004 if ((TARGET_DEBUG_REG || TARGET_DEBUG_TARGET) && global_init_p)
4005 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
4007 /* Remember the explicit arguments. */
4008 if (global_init_p)
4009 rs6000_isa_flags_explicit = global_options_set.x_rs6000_isa_flags;
4011 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
4012 library functions, so warn about it. The flag may be useful for
4013 performance studies from time to time though, so don't disable it
4014 entirely. */
4015 if (global_options_set.x_rs6000_alignment_flags
4016 && rs6000_alignment_flags == MASK_ALIGN_POWER
4017 && DEFAULT_ABI == ABI_DARWIN
4018 && TARGET_64BIT)
4019 warning (0, "-malign-power is not supported for 64-bit Darwin;"
4020 " it is incompatible with the installed C and C++ libraries");
4022 /* Numerous experiment shows that IRA based loop pressure
4023 calculation works better for RTL loop invariant motion on targets
4024 with enough (>= 32) registers. It is an expensive optimization.
4025 So it is on only for peak performance. */
4026 if (optimize >= 3 && global_init_p
4027 && !global_options_set.x_flag_ira_loop_pressure)
4028 flag_ira_loop_pressure = 1;
4030 /* -fsanitize=address needs to turn on -fasynchronous-unwind-tables in order
4031 for tracebacks to be complete but not if any -fasynchronous-unwind-tables
4032 options were already specified. */
4033 if (flag_sanitize & SANITIZE_USER_ADDRESS
4034 && !global_options_set.x_flag_asynchronous_unwind_tables)
4035 flag_asynchronous_unwind_tables = 1;
4037 /* Set the pointer size. */
4038 if (TARGET_64BIT)
4040 rs6000_pmode = (int)DImode;
4041 rs6000_pointer_size = 64;
4043 else
4045 rs6000_pmode = (int)SImode;
4046 rs6000_pointer_size = 32;
4049 /* Some OSs don't support saving the high part of 64-bit registers on context
4050 switch. Other OSs don't support saving Altivec registers. On those OSs,
4051 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
4052 if the user wants either, the user must explicitly specify them and we
4053 won't interfere with the user's specification. */
4055 set_masks = POWERPC_MASKS;
4056 #ifdef OS_MISSING_POWERPC64
4057 if (OS_MISSING_POWERPC64)
4058 set_masks &= ~OPTION_MASK_POWERPC64;
4059 #endif
4060 #ifdef OS_MISSING_ALTIVEC
4061 if (OS_MISSING_ALTIVEC)
4062 set_masks &= ~(OPTION_MASK_ALTIVEC | OPTION_MASK_VSX
4063 | OTHER_VSX_VECTOR_MASKS);
4064 #endif
4066 /* Don't override by the processor default if given explicitly. */
4067 set_masks &= ~rs6000_isa_flags_explicit;
4069 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
4070 the cpu in a target attribute or pragma, but did not specify a tuning
4071 option, use the cpu for the tuning option rather than the option specified
4072 with -mtune on the command line. Process a '--with-cpu' configuration
4073 request as an implicit --cpu. */
4074 if (rs6000_cpu_index >= 0)
4076 cpu_index = rs6000_cpu_index;
4077 have_cpu = true;
4079 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
4081 rs6000_cpu_index = cpu_index = main_target_opt->x_rs6000_cpu_index;
4082 have_cpu = true;
4084 else if (implicit_cpu)
4086 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (implicit_cpu);
4087 have_cpu = true;
4089 else
4091 /* PowerPC 64-bit LE requires at least ISA 2.07. */
4092 const char *default_cpu = ((!TARGET_POWERPC64)
4093 ? "powerpc"
4094 : ((BYTES_BIG_ENDIAN)
4095 ? "powerpc64"
4096 : "powerpc64le"));
4098 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (default_cpu);
4099 have_cpu = false;
4102 gcc_assert (cpu_index >= 0);
4104 if (have_cpu)
4106 #ifndef HAVE_AS_POWER9
4107 if (processor_target_table[rs6000_cpu_index].processor
4108 == PROCESSOR_POWER9)
4110 have_cpu = false;
4111 warning (0, "will not generate power9 instructions because "
4112 "assembler lacks power9 support");
4114 #endif
4115 #ifndef HAVE_AS_POWER8
4116 if (processor_target_table[rs6000_cpu_index].processor
4117 == PROCESSOR_POWER8)
4119 have_cpu = false;
4120 warning (0, "will not generate power8 instructions because "
4121 "assembler lacks power8 support");
4123 #endif
4124 #ifndef HAVE_AS_POPCNTD
4125 if (processor_target_table[rs6000_cpu_index].processor
4126 == PROCESSOR_POWER7)
4128 have_cpu = false;
4129 warning (0, "will not generate power7 instructions because "
4130 "assembler lacks power7 support");
4132 #endif
4133 #ifndef HAVE_AS_DFP
4134 if (processor_target_table[rs6000_cpu_index].processor
4135 == PROCESSOR_POWER6)
4137 have_cpu = false;
4138 warning (0, "will not generate power6 instructions because "
4139 "assembler lacks power6 support");
4141 #endif
4142 #ifndef HAVE_AS_POPCNTB
4143 if (processor_target_table[rs6000_cpu_index].processor
4144 == PROCESSOR_POWER5)
4146 have_cpu = false;
4147 warning (0, "will not generate power5 instructions because "
4148 "assembler lacks power5 support");
4150 #endif
4152 if (!have_cpu)
4154 /* PowerPC 64-bit LE requires at least ISA 2.07. */
4155 const char *default_cpu = (!TARGET_POWERPC64
4156 ? "powerpc"
4157 : (BYTES_BIG_ENDIAN
4158 ? "powerpc64"
4159 : "powerpc64le"));
4161 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (default_cpu);
4165 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
4166 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
4167 with those from the cpu, except for options that were explicitly set. If
4168 we don't have a cpu, do not override the target bits set in
4169 TARGET_DEFAULT. */
4170 if (have_cpu)
4172 rs6000_isa_flags &= ~set_masks;
4173 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
4174 & set_masks);
4176 else
4178 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
4179 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
4180 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
4181 to using rs6000_isa_flags, we need to do the initialization here.
4183 If there is a TARGET_DEFAULT, use that. Otherwise fall back to using
4184 -mcpu=powerpc, -mcpu=powerpc64, or -mcpu=powerpc64le defaults. */
4185 HOST_WIDE_INT flags = ((TARGET_DEFAULT) ? TARGET_DEFAULT
4186 : processor_target_table[cpu_index].target_enable);
4187 rs6000_isa_flags |= (flags & ~rs6000_isa_flags_explicit);
4190 if (rs6000_tune_index >= 0)
4191 tune_index = rs6000_tune_index;
4192 else if (have_cpu)
4193 rs6000_tune_index = tune_index = cpu_index;
4194 else
4196 size_t i;
4197 enum processor_type tune_proc
4198 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
4200 tune_index = -1;
4201 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
4202 if (processor_target_table[i].processor == tune_proc)
4204 rs6000_tune_index = tune_index = i;
4205 break;
4209 gcc_assert (tune_index >= 0);
4210 rs6000_cpu = processor_target_table[tune_index].processor;
4212 /* Pick defaults for SPE related control flags. Do this early to make sure
4213 that the TARGET_ macros are representative ASAP. */
4215 int spe_capable_cpu =
4216 (rs6000_cpu == PROCESSOR_PPC8540
4217 || rs6000_cpu == PROCESSOR_PPC8548);
4219 if (!global_options_set.x_rs6000_spe_abi)
4220 rs6000_spe_abi = spe_capable_cpu;
4222 if (!global_options_set.x_rs6000_spe)
4223 rs6000_spe = spe_capable_cpu;
4225 if (!global_options_set.x_rs6000_float_gprs)
4226 rs6000_float_gprs =
4227 (rs6000_cpu == PROCESSOR_PPC8540 ? 1
4228 : rs6000_cpu == PROCESSOR_PPC8548 ? 2
4229 : 0);
4232 if (global_options_set.x_rs6000_spe_abi
4233 && rs6000_spe_abi
4234 && !TARGET_SPE_ABI)
4235 error ("not configured for SPE ABI");
4237 if (global_options_set.x_rs6000_spe
4238 && rs6000_spe
4239 && !TARGET_SPE)
4240 error ("not configured for SPE instruction set");
4242 if (main_target_opt != NULL
4243 && ((main_target_opt->x_rs6000_spe_abi != rs6000_spe_abi)
4244 || (main_target_opt->x_rs6000_spe != rs6000_spe)
4245 || (main_target_opt->x_rs6000_float_gprs != rs6000_float_gprs)))
4246 error ("target attribute or pragma changes SPE ABI");
4248 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
4249 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
4250 || rs6000_cpu == PROCESSOR_PPCE5500)
4252 if (TARGET_ALTIVEC)
4253 error ("AltiVec not supported in this target");
4254 if (TARGET_SPE)
4255 error ("SPE not supported in this target");
4257 if (rs6000_cpu == PROCESSOR_PPCE6500)
4259 if (TARGET_SPE)
4260 error ("SPE not supported in this target");
4263 /* If we are optimizing big endian systems for space, use the load/store
4264 multiple and string instructions. */
4265 if (BYTES_BIG_ENDIAN && optimize_size)
4266 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & (OPTION_MASK_MULTIPLE
4267 | OPTION_MASK_STRING);
4269 /* Don't allow -mmultiple or -mstring on little endian systems
4270 unless the cpu is a 750, because the hardware doesn't support the
4271 instructions used in little endian mode, and causes an alignment
4272 trap. The 750 does not cause an alignment trap (except when the
4273 target is unaligned). */
4275 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750)
4277 if (TARGET_MULTIPLE)
4279 rs6000_isa_flags &= ~OPTION_MASK_MULTIPLE;
4280 if ((rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE) != 0)
4281 warning (0, "-mmultiple is not supported on little endian systems");
4284 if (TARGET_STRING)
4286 rs6000_isa_flags &= ~OPTION_MASK_STRING;
4287 if ((rs6000_isa_flags_explicit & OPTION_MASK_STRING) != 0)
4288 warning (0, "-mstring is not supported on little endian systems");
4292 /* If little-endian, default to -mstrict-align on older processors.
4293 Testing for htm matches power8 and later. */
4294 if (!BYTES_BIG_ENDIAN
4295 && !(processor_target_table[tune_index].target_enable & OPTION_MASK_HTM))
4296 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_STRICT_ALIGN;
4298 /* -maltivec={le,be} implies -maltivec. */
4299 if (rs6000_altivec_element_order != 0)
4300 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
4302 /* Disallow -maltivec=le in big endian mode for now. This is not
4303 known to be useful for anyone. */
4304 if (BYTES_BIG_ENDIAN && rs6000_altivec_element_order == 1)
4306 warning (0, N_("-maltivec=le not allowed for big-endian targets"));
4307 rs6000_altivec_element_order = 0;
4310 /* Add some warnings for VSX. */
4311 if (TARGET_VSX)
4313 const char *msg = NULL;
4314 if (!TARGET_HARD_FLOAT || !TARGET_FPRS
4315 || !TARGET_SINGLE_FLOAT || !TARGET_DOUBLE_FLOAT)
4317 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4318 msg = N_("-mvsx requires hardware floating point");
4319 else
4321 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
4322 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4325 else if (TARGET_PAIRED_FLOAT)
4326 msg = N_("-mvsx and -mpaired are incompatible");
4327 else if (TARGET_AVOID_XFORM > 0)
4328 msg = N_("-mvsx needs indexed addressing");
4329 else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit
4330 & OPTION_MASK_ALTIVEC))
4332 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4333 msg = N_("-mvsx and -mno-altivec are incompatible");
4334 else
4335 msg = N_("-mno-altivec disables vsx");
4338 if (msg)
4340 warning (0, msg);
4341 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
4342 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4346 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
4347 the -mcpu setting to enable options that conflict. */
4348 if ((!TARGET_HARD_FLOAT || !TARGET_ALTIVEC || !TARGET_VSX)
4349 && (rs6000_isa_flags_explicit & (OPTION_MASK_SOFT_FLOAT
4350 | OPTION_MASK_ALTIVEC
4351 | OPTION_MASK_VSX)) != 0)
4352 rs6000_isa_flags &= ~((OPTION_MASK_P8_VECTOR | OPTION_MASK_CRYPTO
4353 | OPTION_MASK_DIRECT_MOVE)
4354 & ~rs6000_isa_flags_explicit);
4356 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4357 rs6000_print_isa_options (stderr, 0, "before defaults", rs6000_isa_flags);
4359 /* Handle explicit -mno-{altivec,vsx,power8-vector,power9-vector} and turn
4360 off all of the options that depend on those flags. */
4361 ignore_masks = rs6000_disable_incompatible_switches ();
4363 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
4364 unless the user explicitly used the -mno-<option> to disable the code. */
4365 if (TARGET_P9_VECTOR || TARGET_MODULO || TARGET_P9_DFORM_SCALAR
4366 || TARGET_P9_DFORM_VECTOR || TARGET_P9_DFORM_BOTH > 0)
4367 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4368 else if (TARGET_P9_MINMAX)
4370 if (have_cpu)
4372 if (cpu_index == PROCESSOR_POWER9)
4374 /* legacy behavior: allow -mcpu-power9 with certain
4375 capabilities explicitly disabled. */
4376 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4377 /* However, reject this automatic fix if certain
4378 capabilities required for TARGET_P9_MINMAX support
4379 have been explicitly disabled. */
4380 if (((OPTION_MASK_VSX | OPTION_MASK_UPPER_REGS_SF
4381 | OPTION_MASK_UPPER_REGS_DF) & rs6000_isa_flags)
4382 != (OPTION_MASK_VSX | OPTION_MASK_UPPER_REGS_SF
4383 | OPTION_MASK_UPPER_REGS_DF))
4384 error ("-mpower9-minmax incompatible with explicitly disabled options");
4386 else
4387 error ("Power9 target option is incompatible with -mcpu=<xxx> for "
4388 "<xxx> less than power9");
4390 else if ((ISA_3_0_MASKS_SERVER & rs6000_isa_flags_explicit)
4391 != (ISA_3_0_MASKS_SERVER & rs6000_isa_flags
4392 & rs6000_isa_flags_explicit))
4393 /* Enforce that none of the ISA_3_0_MASKS_SERVER flags
4394 were explicitly cleared. */
4395 error ("-mpower9-minmax incompatible with explicitly disabled options");
4396 else
4397 rs6000_isa_flags |= ISA_3_0_MASKS_SERVER;
4399 else if (TARGET_P8_VECTOR || TARGET_DIRECT_MOVE || TARGET_CRYPTO)
4400 rs6000_isa_flags |= (ISA_2_7_MASKS_SERVER & ~ignore_masks);
4401 else if (TARGET_VSX)
4402 rs6000_isa_flags |= (ISA_2_6_MASKS_SERVER & ~ignore_masks);
4403 else if (TARGET_POPCNTD)
4404 rs6000_isa_flags |= (ISA_2_6_MASKS_EMBEDDED & ~ignore_masks);
4405 else if (TARGET_DFP)
4406 rs6000_isa_flags |= (ISA_2_5_MASKS_SERVER & ~ignore_masks);
4407 else if (TARGET_CMPB)
4408 rs6000_isa_flags |= (ISA_2_5_MASKS_EMBEDDED & ~ignore_masks);
4409 else if (TARGET_FPRND)
4410 rs6000_isa_flags |= (ISA_2_4_MASKS & ~ignore_masks);
4411 else if (TARGET_POPCNTB)
4412 rs6000_isa_flags |= (ISA_2_2_MASKS & ~ignore_masks);
4413 else if (TARGET_ALTIVEC)
4414 rs6000_isa_flags |= (OPTION_MASK_PPC_GFXOPT & ~ignore_masks);
4416 if (TARGET_CRYPTO && !TARGET_ALTIVEC)
4418 if (rs6000_isa_flags_explicit & OPTION_MASK_CRYPTO)
4419 error ("-mcrypto requires -maltivec");
4420 rs6000_isa_flags &= ~OPTION_MASK_CRYPTO;
4423 if (TARGET_DIRECT_MOVE && !TARGET_VSX)
4425 if (rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
4426 error ("-mdirect-move requires -mvsx");
4427 rs6000_isa_flags &= ~OPTION_MASK_DIRECT_MOVE;
4430 if (TARGET_P8_VECTOR && !TARGET_ALTIVEC)
4432 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4433 error ("-mpower8-vector requires -maltivec");
4434 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4437 if (TARGET_P8_VECTOR && !TARGET_VSX)
4439 if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4440 && (rs6000_isa_flags_explicit & OPTION_MASK_VSX))
4441 error ("-mpower8-vector requires -mvsx");
4442 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR) == 0)
4444 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4445 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4446 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4448 else
4450 /* OPTION_MASK_P8_VECTOR is explicit, and OPTION_MASK_VSX is
4451 not explicit. */
4452 rs6000_isa_flags |= OPTION_MASK_VSX;
4453 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4457 if (TARGET_VSX_TIMODE && !TARGET_VSX)
4459 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX_TIMODE)
4460 error ("-mvsx-timode requires -mvsx");
4461 rs6000_isa_flags &= ~OPTION_MASK_VSX_TIMODE;
4464 if (TARGET_DFP && !TARGET_HARD_FLOAT)
4466 if (rs6000_isa_flags_explicit & OPTION_MASK_DFP)
4467 error ("-mhard-dfp requires -mhard-float");
4468 rs6000_isa_flags &= ~OPTION_MASK_DFP;
4471 /* Allow an explicit -mupper-regs to set -mupper-regs-df, -mupper-regs-di,
4472 and -mupper-regs-sf, depending on the cpu, unless the user explicitly also
4473 set the individual option. */
4474 if (TARGET_UPPER_REGS > 0)
4476 if (TARGET_VSX
4477 && !(rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DF))
4479 rs6000_isa_flags |= OPTION_MASK_UPPER_REGS_DF;
4480 rs6000_isa_flags_explicit |= OPTION_MASK_UPPER_REGS_DF;
4482 if (TARGET_VSX
4483 && !(rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DI))
4485 rs6000_isa_flags |= OPTION_MASK_UPPER_REGS_DI;
4486 rs6000_isa_flags_explicit |= OPTION_MASK_UPPER_REGS_DI;
4488 if (TARGET_P8_VECTOR
4489 && !(rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_SF))
4491 rs6000_isa_flags |= OPTION_MASK_UPPER_REGS_SF;
4492 rs6000_isa_flags_explicit |= OPTION_MASK_UPPER_REGS_SF;
4495 else if (TARGET_UPPER_REGS == 0)
4497 if (TARGET_VSX
4498 && !(rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DF))
4500 rs6000_isa_flags &= ~OPTION_MASK_UPPER_REGS_DF;
4501 rs6000_isa_flags_explicit |= OPTION_MASK_UPPER_REGS_DF;
4503 if (TARGET_VSX
4504 && !(rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DI))
4506 rs6000_isa_flags &= ~OPTION_MASK_UPPER_REGS_DI;
4507 rs6000_isa_flags_explicit |= OPTION_MASK_UPPER_REGS_DI;
4509 if (TARGET_P8_VECTOR
4510 && !(rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_SF))
4512 rs6000_isa_flags &= ~OPTION_MASK_UPPER_REGS_SF;
4513 rs6000_isa_flags_explicit |= OPTION_MASK_UPPER_REGS_SF;
4517 if (TARGET_UPPER_REGS_DF && !TARGET_VSX)
4519 if (rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DF)
4520 error ("-mupper-regs-df requires -mvsx");
4521 rs6000_isa_flags &= ~OPTION_MASK_UPPER_REGS_DF;
4524 if (TARGET_UPPER_REGS_DI && !TARGET_VSX)
4526 if (rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DI)
4527 error ("-mupper-regs-di requires -mvsx");
4528 rs6000_isa_flags &= ~OPTION_MASK_UPPER_REGS_DI;
4531 if (TARGET_UPPER_REGS_SF && !TARGET_P8_VECTOR)
4533 if (rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_SF)
4534 error ("-mupper-regs-sf requires -mpower8-vector");
4535 rs6000_isa_flags &= ~OPTION_MASK_UPPER_REGS_SF;
4538 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
4539 silently turn off quad memory mode. */
4540 if ((TARGET_QUAD_MEMORY || TARGET_QUAD_MEMORY_ATOMIC) && !TARGET_POWERPC64)
4542 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4543 warning (0, N_("-mquad-memory requires 64-bit mode"));
4545 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
4546 warning (0, N_("-mquad-memory-atomic requires 64-bit mode"));
4548 rs6000_isa_flags &= ~(OPTION_MASK_QUAD_MEMORY
4549 | OPTION_MASK_QUAD_MEMORY_ATOMIC);
4552 /* Non-atomic quad memory load/store are disabled for little endian, since
4553 the words are reversed, but atomic operations can still be done by
4554 swapping the words. */
4555 if (TARGET_QUAD_MEMORY && !WORDS_BIG_ENDIAN)
4557 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4558 warning (0, N_("-mquad-memory is not available in little endian mode"));
4560 rs6000_isa_flags &= ~OPTION_MASK_QUAD_MEMORY;
4563 /* Assume if the user asked for normal quad memory instructions, they want
4564 the atomic versions as well, unless they explicity told us not to use quad
4565 word atomic instructions. */
4566 if (TARGET_QUAD_MEMORY
4567 && !TARGET_QUAD_MEMORY_ATOMIC
4568 && ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) == 0))
4569 rs6000_isa_flags |= OPTION_MASK_QUAD_MEMORY_ATOMIC;
4571 /* Enable power8 fusion if we are tuning for power8, even if we aren't
4572 generating power8 instructions. */
4573 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION))
4574 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
4575 & OPTION_MASK_P8_FUSION);
4577 /* Setting additional fusion flags turns on base fusion. */
4578 if (!TARGET_P8_FUSION && (TARGET_P8_FUSION_SIGN || TARGET_TOC_FUSION))
4580 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4582 if (TARGET_P8_FUSION_SIGN)
4583 error ("-mpower8-fusion-sign requires -mpower8-fusion");
4585 if (TARGET_TOC_FUSION)
4586 error ("-mtoc-fusion requires -mpower8-fusion");
4588 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
4590 else
4591 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4594 /* Power9 fusion is a superset over power8 fusion. */
4595 if (TARGET_P9_FUSION && !TARGET_P8_FUSION)
4597 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4599 /* We prefer to not mention undocumented options in
4600 error messages. However, if users have managed to select
4601 power9-fusion without selecting power8-fusion, they
4602 already know about undocumented flags. */
4603 error ("-mpower9-fusion requires -mpower8-fusion");
4604 rs6000_isa_flags &= ~OPTION_MASK_P9_FUSION;
4606 else
4607 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4610 /* Enable power9 fusion if we are tuning for power9, even if we aren't
4611 generating power9 instructions. */
4612 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_FUSION))
4613 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
4614 & OPTION_MASK_P9_FUSION);
4616 /* Power8 does not fuse sign extended loads with the addis. If we are
4617 optimizing at high levels for speed, convert a sign extended load into a
4618 zero extending load, and an explicit sign extension. */
4619 if (TARGET_P8_FUSION
4620 && !(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION_SIGN)
4621 && optimize_function_for_speed_p (cfun)
4622 && optimize >= 3)
4623 rs6000_isa_flags |= OPTION_MASK_P8_FUSION_SIGN;
4625 /* TOC fusion requires 64-bit and medium/large code model. */
4626 if (TARGET_TOC_FUSION && !TARGET_POWERPC64)
4628 rs6000_isa_flags &= ~OPTION_MASK_TOC_FUSION;
4629 if ((rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION) != 0)
4630 warning (0, N_("-mtoc-fusion requires 64-bit"));
4633 if (TARGET_TOC_FUSION && (TARGET_CMODEL == CMODEL_SMALL))
4635 rs6000_isa_flags &= ~OPTION_MASK_TOC_FUSION;
4636 if ((rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION) != 0)
4637 warning (0, N_("-mtoc-fusion requires medium/large code model"));
4640 /* Turn on -mtoc-fusion by default if p8-fusion and 64-bit medium/large code
4641 model. */
4642 if (TARGET_P8_FUSION && !TARGET_TOC_FUSION && TARGET_POWERPC64
4643 && (TARGET_CMODEL != CMODEL_SMALL)
4644 && !(rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION))
4645 rs6000_isa_flags |= OPTION_MASK_TOC_FUSION;
4647 /* ISA 3.0 vector instructions include ISA 2.07. */
4648 if (TARGET_P9_VECTOR && !TARGET_P8_VECTOR)
4650 /* We prefer to not mention undocumented options in
4651 error messages. However, if users have managed to select
4652 power9-vector without selecting power8-vector, they
4653 already know about undocumented flags. */
4654 if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) &&
4655 (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR))
4656 error ("-mpower9-vector requires -mpower8-vector");
4657 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) == 0)
4659 rs6000_isa_flags &= ~OPTION_MASK_P9_VECTOR;
4660 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4661 rs6000_isa_flags_explicit |= OPTION_MASK_P9_VECTOR;
4663 else
4665 /* OPTION_MASK_P9_VECTOR is explicit and
4666 OPTION_MASK_P8_VECTOR is not explicit. */
4667 rs6000_isa_flags |= OPTION_MASK_P8_VECTOR;
4668 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4672 /* -mpower9-dform turns on both -mpower9-dform-scalar and
4673 -mpower9-dform-vector. */
4674 if (TARGET_P9_DFORM_BOTH > 0)
4676 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_VECTOR))
4677 rs6000_isa_flags |= OPTION_MASK_P9_DFORM_VECTOR;
4679 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_SCALAR))
4680 rs6000_isa_flags |= OPTION_MASK_P9_DFORM_SCALAR;
4682 else if (TARGET_P9_DFORM_BOTH == 0)
4684 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_VECTOR))
4685 rs6000_isa_flags &= ~OPTION_MASK_P9_DFORM_VECTOR;
4687 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_SCALAR))
4688 rs6000_isa_flags &= ~OPTION_MASK_P9_DFORM_SCALAR;
4691 /* ISA 3.0 D-form instructions require p9-vector and upper-regs. */
4692 if ((TARGET_P9_DFORM_SCALAR || TARGET_P9_DFORM_VECTOR) && !TARGET_P9_VECTOR)
4694 /* We prefer to not mention undocumented options in
4695 error messages. However, if users have managed to select
4696 power9-dform without selecting power9-vector, they
4697 already know about undocumented flags. */
4698 if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR)
4699 && (rs6000_isa_flags_explicit & (OPTION_MASK_P9_DFORM_SCALAR
4700 | OPTION_MASK_P9_DFORM_VECTOR)))
4701 error ("-mpower9-dform requires -mpower9-vector");
4702 else if (rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR)
4704 rs6000_isa_flags &=
4705 ~(OPTION_MASK_P9_DFORM_SCALAR | OPTION_MASK_P9_DFORM_VECTOR);
4706 rs6000_isa_flags_explicit |=
4707 (OPTION_MASK_P9_DFORM_SCALAR | OPTION_MASK_P9_DFORM_VECTOR);
4709 else
4711 /* We know that OPTION_MASK_P9_VECTOR is not explicit and
4712 OPTION_MASK_P9_DFORM_SCALAR or OPTION_MASK_P9_DORM_VECTOR
4713 may be explicit. */
4714 rs6000_isa_flags |= OPTION_MASK_P9_VECTOR;
4715 rs6000_isa_flags_explicit |= OPTION_MASK_P9_VECTOR;
4719 if ((TARGET_P9_DFORM_SCALAR || TARGET_P9_DFORM_VECTOR)
4720 && !TARGET_DIRECT_MOVE)
4722 /* We prefer to not mention undocumented options in
4723 error messages. However, if users have managed to select
4724 power9-dform without selecting direct-move, they
4725 already know about undocumented flags. */
4726 if ((rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
4727 && ((rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_VECTOR) ||
4728 (rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_SCALAR) ||
4729 (TARGET_P9_DFORM_BOTH == 1)))
4730 error ("-mpower9-dform, -mpower9-dform-vector, -mpower9-dform-scalar"
4731 " require -mdirect-move");
4732 else if ((rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE) == 0)
4734 rs6000_isa_flags |= OPTION_MASK_DIRECT_MOVE;
4735 rs6000_isa_flags_explicit |= OPTION_MASK_DIRECT_MOVE;
4737 else
4739 rs6000_isa_flags &=
4740 ~(OPTION_MASK_P9_DFORM_SCALAR | OPTION_MASK_P9_DFORM_VECTOR);
4741 rs6000_isa_flags_explicit |=
4742 (OPTION_MASK_P9_DFORM_SCALAR | OPTION_MASK_P9_DFORM_VECTOR);
4746 if (TARGET_P9_DFORM_SCALAR && !TARGET_UPPER_REGS_DF)
4748 /* We prefer to not mention undocumented options in
4749 error messages. However, if users have managed to select
4750 power9-dform without selecting upper-regs-df, they
4751 already know about undocumented flags. */
4752 if (rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DF)
4753 error ("-mpower9-dform requires -mupper-regs-df");
4754 rs6000_isa_flags &= ~OPTION_MASK_P9_DFORM_SCALAR;
4757 if (TARGET_P9_DFORM_SCALAR && !TARGET_UPPER_REGS_SF)
4759 if (rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_SF)
4760 error ("-mpower9-dform requires -mupper-regs-sf");
4761 rs6000_isa_flags &= ~OPTION_MASK_P9_DFORM_SCALAR;
4764 /* Enable LRA by default. */
4765 if ((rs6000_isa_flags_explicit & OPTION_MASK_LRA) == 0)
4766 rs6000_isa_flags |= OPTION_MASK_LRA;
4768 /* There have been bugs with -mvsx-timode that don't show up with -mlra,
4769 but do show up with -mno-lra. Given -mlra will become the default once
4770 PR 69847 is fixed, turn off the options with problems by default if
4771 -mno-lra was used, and warn if the user explicitly asked for the option.
4773 Enable -mpower9-dform-vector by default if LRA and other power9 options.
4774 Enable -mvsx-timode by default if LRA and VSX. */
4775 if (!TARGET_LRA)
4777 if (TARGET_VSX_TIMODE)
4779 if ((rs6000_isa_flags_explicit & OPTION_MASK_VSX_TIMODE) != 0)
4780 warning (0, "-mvsx-timode might need -mlra");
4782 else
4783 rs6000_isa_flags &= ~OPTION_MASK_VSX_TIMODE;
4787 else
4789 if (TARGET_VSX && !TARGET_VSX_TIMODE
4790 && (rs6000_isa_flags_explicit & OPTION_MASK_VSX_TIMODE) == 0)
4791 rs6000_isa_flags |= OPTION_MASK_VSX_TIMODE;
4794 /* Set -mallow-movmisalign to explicitly on if we have full ISA 2.07
4795 support. If we only have ISA 2.06 support, and the user did not specify
4796 the switch, leave it set to -1 so the movmisalign patterns are enabled,
4797 but we don't enable the full vectorization support */
4798 if (TARGET_ALLOW_MOVMISALIGN == -1 && TARGET_P8_VECTOR && TARGET_DIRECT_MOVE)
4799 TARGET_ALLOW_MOVMISALIGN = 1;
4801 else if (TARGET_ALLOW_MOVMISALIGN && !TARGET_VSX)
4803 if (TARGET_ALLOW_MOVMISALIGN > 0
4804 && global_options_set.x_TARGET_ALLOW_MOVMISALIGN)
4805 error ("-mallow-movmisalign requires -mvsx");
4807 TARGET_ALLOW_MOVMISALIGN = 0;
4810 /* Determine when unaligned vector accesses are permitted, and when
4811 they are preferred over masked Altivec loads. Note that if
4812 TARGET_ALLOW_MOVMISALIGN has been disabled by the user, then
4813 TARGET_EFFICIENT_UNALIGNED_VSX must be as well. The converse is
4814 not true. */
4815 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4817 if (!TARGET_VSX)
4819 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4820 error ("-mefficient-unaligned-vsx requires -mvsx");
4822 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4825 else if (!TARGET_ALLOW_MOVMISALIGN)
4827 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4828 error ("-mefficient-unaligned-vsx requires -mallow-movmisalign");
4830 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4834 /* Check whether we should allow small integers into VSX registers. We
4835 require direct move to prevent the register allocator from having to move
4836 variables through memory to do moves. SImode can be used on ISA 2.07,
4837 while HImode and QImode require ISA 3.0. */
4838 if (TARGET_VSX_SMALL_INTEGER
4839 && (!TARGET_DIRECT_MOVE || !TARGET_P8_VECTOR || !TARGET_UPPER_REGS_DI))
4841 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX_SMALL_INTEGER)
4842 error ("-mvsx-small-integer requires -mpower8-vector, "
4843 "-mupper-regs-di, and -mdirect-move");
4845 rs6000_isa_flags &= ~OPTION_MASK_VSX_SMALL_INTEGER;
4848 /* Set long double size before the IEEE 128-bit tests. */
4849 if (!global_options_set.x_rs6000_long_double_type_size)
4851 if (main_target_opt != NULL
4852 && (main_target_opt->x_rs6000_long_double_type_size
4853 != RS6000_DEFAULT_LONG_DOUBLE_SIZE))
4854 error ("target attribute or pragma changes long double size");
4855 else
4856 rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
4859 /* Set -mabi=ieeelongdouble on some old targets. Note, AIX and Darwin
4860 explicitly redefine TARGET_IEEEQUAD to 0, so those systems will not
4861 pick up this default. */
4862 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
4863 if (!global_options_set.x_rs6000_ieeequad)
4864 rs6000_ieeequad = 1;
4865 #endif
4867 /* Enable the default support for IEEE 128-bit floating point on Linux VSX
4868 sytems, but don't enable the __float128 keyword. */
4869 if (TARGET_VSX && TARGET_LONG_DOUBLE_128
4870 && (TARGET_FLOAT128_ENABLE_TYPE || TARGET_IEEEQUAD)
4871 && ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_TYPE) == 0))
4872 rs6000_isa_flags |= OPTION_MASK_FLOAT128_TYPE;
4874 /* IEEE 128-bit floating point requires VSX support. */
4875 if (!TARGET_VSX)
4877 if (TARGET_FLOAT128_KEYWORD)
4879 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) != 0)
4880 error ("-mfloat128 requires VSX support");
4882 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_TYPE
4883 | OPTION_MASK_FLOAT128_KEYWORD
4884 | OPTION_MASK_FLOAT128_HW);
4887 else if (TARGET_FLOAT128_TYPE)
4889 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_TYPE) != 0)
4890 error ("-mfloat128-type requires VSX support");
4892 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_TYPE
4893 | OPTION_MASK_FLOAT128_KEYWORD
4894 | OPTION_MASK_FLOAT128_HW);
4898 /* -mfloat128 and -mfloat128-hardware internally require the underlying IEEE
4899 128-bit floating point support to be enabled. */
4900 if (!TARGET_FLOAT128_TYPE)
4902 if (TARGET_FLOAT128_KEYWORD)
4904 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) != 0)
4906 error ("-mfloat128 requires -mfloat128-type");
4907 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_TYPE
4908 | OPTION_MASK_FLOAT128_KEYWORD
4909 | OPTION_MASK_FLOAT128_HW);
4911 else
4912 rs6000_isa_flags |= OPTION_MASK_FLOAT128_TYPE;
4915 if (TARGET_FLOAT128_HW)
4917 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4919 error ("-mfloat128-hardware requires -mfloat128-type");
4920 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4922 else
4923 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_TYPE
4924 | OPTION_MASK_FLOAT128_KEYWORD
4925 | OPTION_MASK_FLOAT128_HW);
4929 /* If we have -mfloat128-type and full ISA 3.0 support, enable
4930 -mfloat128-hardware by default. However, don't enable the __float128
4931 keyword. If the user explicitly turned on -mfloat128-hardware, enable the
4932 -mfloat128 option as well if it was not already set. */
4933 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_HW
4934 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) == ISA_3_0_MASKS_IEEE
4935 && !(rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW))
4936 rs6000_isa_flags |= OPTION_MASK_FLOAT128_HW;
4938 if (TARGET_FLOAT128_HW
4939 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) != ISA_3_0_MASKS_IEEE)
4941 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4942 error ("-mfloat128-hardware requires full ISA 3.0 support");
4944 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4947 if (TARGET_FLOAT128_HW && !TARGET_64BIT)
4949 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4950 error ("-mfloat128-hardware requires -m64");
4952 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4955 if (TARGET_FLOAT128_HW && !TARGET_FLOAT128_KEYWORD
4956 && (rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0
4957 && (rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) == 0)
4958 rs6000_isa_flags |= OPTION_MASK_FLOAT128_KEYWORD;
4960 /* Print the options after updating the defaults. */
4961 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4962 rs6000_print_isa_options (stderr, 0, "after defaults", rs6000_isa_flags);
4964 /* E500mc does "better" if we inline more aggressively. Respect the
4965 user's opinion, though. */
4966 if (rs6000_block_move_inline_limit == 0
4967 && (rs6000_cpu == PROCESSOR_PPCE500MC
4968 || rs6000_cpu == PROCESSOR_PPCE500MC64
4969 || rs6000_cpu == PROCESSOR_PPCE5500
4970 || rs6000_cpu == PROCESSOR_PPCE6500))
4971 rs6000_block_move_inline_limit = 128;
4973 /* store_one_arg depends on expand_block_move to handle at least the
4974 size of reg_parm_stack_space. */
4975 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
4976 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
4978 if (global_init_p)
4980 /* If the appropriate debug option is enabled, replace the target hooks
4981 with debug versions that call the real version and then prints
4982 debugging information. */
4983 if (TARGET_DEBUG_COST)
4985 targetm.rtx_costs = rs6000_debug_rtx_costs;
4986 targetm.address_cost = rs6000_debug_address_cost;
4987 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
4990 if (TARGET_DEBUG_ADDR)
4992 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
4993 targetm.legitimize_address = rs6000_debug_legitimize_address;
4994 rs6000_secondary_reload_class_ptr
4995 = rs6000_debug_secondary_reload_class;
4996 rs6000_secondary_memory_needed_ptr
4997 = rs6000_debug_secondary_memory_needed;
4998 rs6000_cannot_change_mode_class_ptr
4999 = rs6000_debug_cannot_change_mode_class;
5000 rs6000_preferred_reload_class_ptr
5001 = rs6000_debug_preferred_reload_class;
5002 rs6000_legitimize_reload_address_ptr
5003 = rs6000_debug_legitimize_reload_address;
5004 rs6000_mode_dependent_address_ptr
5005 = rs6000_debug_mode_dependent_address;
5008 if (rs6000_veclibabi_name)
5010 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
5011 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
5012 else
5014 error ("unknown vectorization library ABI type (%s) for "
5015 "-mveclibabi= switch", rs6000_veclibabi_name);
5016 ret = false;
5021 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
5022 target attribute or pragma which automatically enables both options,
5023 unless the altivec ABI was set. This is set by default for 64-bit, but
5024 not for 32-bit. */
5025 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
5026 rs6000_isa_flags &= ~((OPTION_MASK_VSX | OPTION_MASK_ALTIVEC
5027 | OPTION_MASK_FLOAT128_TYPE
5028 | OPTION_MASK_FLOAT128_KEYWORD)
5029 & ~rs6000_isa_flags_explicit);
5031 /* Enable Altivec ABI for AIX -maltivec. */
5032 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
5034 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
5035 error ("target attribute or pragma changes AltiVec ABI");
5036 else
5037 rs6000_altivec_abi = 1;
5040 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
5041 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
5042 be explicitly overridden in either case. */
5043 if (TARGET_ELF)
5045 if (!global_options_set.x_rs6000_altivec_abi
5046 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
5048 if (main_target_opt != NULL &&
5049 !main_target_opt->x_rs6000_altivec_abi)
5050 error ("target attribute or pragma changes AltiVec ABI");
5051 else
5052 rs6000_altivec_abi = 1;
5056 /* Set the Darwin64 ABI as default for 64-bit Darwin.
5057 So far, the only darwin64 targets are also MACH-O. */
5058 if (TARGET_MACHO
5059 && DEFAULT_ABI == ABI_DARWIN
5060 && TARGET_64BIT)
5062 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
5063 error ("target attribute or pragma changes darwin64 ABI");
5064 else
5066 rs6000_darwin64_abi = 1;
5067 /* Default to natural alignment, for better performance. */
5068 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
5072 /* Place FP constants in the constant pool instead of TOC
5073 if section anchors enabled. */
5074 if (flag_section_anchors
5075 && !global_options_set.x_TARGET_NO_FP_IN_TOC)
5076 TARGET_NO_FP_IN_TOC = 1;
5078 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
5079 rs6000_print_isa_options (stderr, 0, "before subtarget", rs6000_isa_flags);
5081 #ifdef SUBTARGET_OVERRIDE_OPTIONS
5082 SUBTARGET_OVERRIDE_OPTIONS;
5083 #endif
5084 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
5085 SUBSUBTARGET_OVERRIDE_OPTIONS;
5086 #endif
5087 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
5088 SUB3TARGET_OVERRIDE_OPTIONS;
5089 #endif
5091 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
5092 rs6000_print_isa_options (stderr, 0, "after subtarget", rs6000_isa_flags);
5094 /* For the E500 family of cores, reset the single/double FP flags to let us
5095 check that they remain constant across attributes or pragmas. Also,
5096 clear a possible request for string instructions, not supported and which
5097 we might have silently queried above for -Os.
5099 For other families, clear ISEL in case it was set implicitly.
5102 switch (rs6000_cpu)
5104 case PROCESSOR_PPC8540:
5105 case PROCESSOR_PPC8548:
5106 case PROCESSOR_PPCE500MC:
5107 case PROCESSOR_PPCE500MC64:
5108 case PROCESSOR_PPCE5500:
5109 case PROCESSOR_PPCE6500:
5111 rs6000_single_float = TARGET_E500_SINGLE || TARGET_E500_DOUBLE;
5112 rs6000_double_float = TARGET_E500_DOUBLE;
5114 rs6000_isa_flags &= ~OPTION_MASK_STRING;
5116 break;
5118 default:
5120 if (have_cpu && !(rs6000_isa_flags_explicit & OPTION_MASK_ISEL))
5121 rs6000_isa_flags &= ~OPTION_MASK_ISEL;
5123 break;
5126 if (main_target_opt)
5128 if (main_target_opt->x_rs6000_single_float != rs6000_single_float)
5129 error ("target attribute or pragma changes single precision floating "
5130 "point");
5131 if (main_target_opt->x_rs6000_double_float != rs6000_double_float)
5132 error ("target attribute or pragma changes double precision floating "
5133 "point");
5136 /* Detect invalid option combinations with E500. */
5137 CHECK_E500_OPTIONS;
5139 rs6000_always_hint = (rs6000_cpu != PROCESSOR_POWER4
5140 && rs6000_cpu != PROCESSOR_POWER5
5141 && rs6000_cpu != PROCESSOR_POWER6
5142 && rs6000_cpu != PROCESSOR_POWER7
5143 && rs6000_cpu != PROCESSOR_POWER8
5144 && rs6000_cpu != PROCESSOR_POWER9
5145 && rs6000_cpu != PROCESSOR_PPCA2
5146 && rs6000_cpu != PROCESSOR_CELL
5147 && rs6000_cpu != PROCESSOR_PPC476);
5148 rs6000_sched_groups = (rs6000_cpu == PROCESSOR_POWER4
5149 || rs6000_cpu == PROCESSOR_POWER5
5150 || rs6000_cpu == PROCESSOR_POWER7
5151 || rs6000_cpu == PROCESSOR_POWER8);
5152 rs6000_align_branch_targets = (rs6000_cpu == PROCESSOR_POWER4
5153 || rs6000_cpu == PROCESSOR_POWER5
5154 || rs6000_cpu == PROCESSOR_POWER6
5155 || rs6000_cpu == PROCESSOR_POWER7
5156 || rs6000_cpu == PROCESSOR_POWER8
5157 || rs6000_cpu == PROCESSOR_POWER9
5158 || rs6000_cpu == PROCESSOR_PPCE500MC
5159 || rs6000_cpu == PROCESSOR_PPCE500MC64
5160 || rs6000_cpu == PROCESSOR_PPCE5500
5161 || rs6000_cpu == PROCESSOR_PPCE6500);
5163 /* Allow debug switches to override the above settings. These are set to -1
5164 in rs6000.opt to indicate the user hasn't directly set the switch. */
5165 if (TARGET_ALWAYS_HINT >= 0)
5166 rs6000_always_hint = TARGET_ALWAYS_HINT;
5168 if (TARGET_SCHED_GROUPS >= 0)
5169 rs6000_sched_groups = TARGET_SCHED_GROUPS;
5171 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
5172 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
5174 rs6000_sched_restricted_insns_priority
5175 = (rs6000_sched_groups ? 1 : 0);
5177 /* Handle -msched-costly-dep option. */
5178 rs6000_sched_costly_dep
5179 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
5181 if (rs6000_sched_costly_dep_str)
5183 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
5184 rs6000_sched_costly_dep = no_dep_costly;
5185 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
5186 rs6000_sched_costly_dep = all_deps_costly;
5187 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
5188 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
5189 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
5190 rs6000_sched_costly_dep = store_to_load_dep_costly;
5191 else
5192 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
5193 atoi (rs6000_sched_costly_dep_str));
5196 /* Handle -minsert-sched-nops option. */
5197 rs6000_sched_insert_nops
5198 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
5200 if (rs6000_sched_insert_nops_str)
5202 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
5203 rs6000_sched_insert_nops = sched_finish_none;
5204 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
5205 rs6000_sched_insert_nops = sched_finish_pad_groups;
5206 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
5207 rs6000_sched_insert_nops = sched_finish_regroup_exact;
5208 else
5209 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
5210 atoi (rs6000_sched_insert_nops_str));
5213 /* Handle stack protector */
5214 if (!global_options_set.x_rs6000_stack_protector_guard)
5215 #ifdef TARGET_THREAD_SSP_OFFSET
5216 rs6000_stack_protector_guard = SSP_TLS;
5217 #else
5218 rs6000_stack_protector_guard = SSP_GLOBAL;
5219 #endif
5221 #ifdef TARGET_THREAD_SSP_OFFSET
5222 rs6000_stack_protector_guard_offset = TARGET_THREAD_SSP_OFFSET;
5223 rs6000_stack_protector_guard_reg = TARGET_64BIT ? 13 : 2;
5224 #endif
5226 if (global_options_set.x_rs6000_stack_protector_guard_offset_str)
5228 char *endp;
5229 const char *str = rs6000_stack_protector_guard_offset_str;
5231 errno = 0;
5232 long offset = strtol (str, &endp, 0);
5233 if (!*str || *endp || errno)
5234 error ("%qs is not a valid number "
5235 "in -mstack-protector-guard-offset=", str);
5237 if (!IN_RANGE (offset, -0x8000, 0x7fff)
5238 || (TARGET_64BIT && (offset & 3)))
5239 error ("%qs is not a valid offset "
5240 "in -mstack-protector-guard-offset=", str);
5242 rs6000_stack_protector_guard_offset = offset;
5245 if (global_options_set.x_rs6000_stack_protector_guard_reg_str)
5247 const char *str = rs6000_stack_protector_guard_reg_str;
5248 int reg = decode_reg_name (str);
5250 if (!IN_RANGE (reg, 1, 31))
5251 error ("%qs is not a valid base register "
5252 "in -mstack-protector-guard-reg=", str);
5254 rs6000_stack_protector_guard_reg = reg;
5257 if (rs6000_stack_protector_guard == SSP_TLS
5258 && !IN_RANGE (rs6000_stack_protector_guard_reg, 1, 31))
5259 error ("-mstack-protector-guard=tls needs a valid base register");
5261 if (global_init_p)
5263 #ifdef TARGET_REGNAMES
5264 /* If the user desires alternate register names, copy in the
5265 alternate names now. */
5266 if (TARGET_REGNAMES)
5267 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
5268 #endif
5270 /* Set aix_struct_return last, after the ABI is determined.
5271 If -maix-struct-return or -msvr4-struct-return was explicitly
5272 used, don't override with the ABI default. */
5273 if (!global_options_set.x_aix_struct_return)
5274 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
5276 #if 0
5277 /* IBM XL compiler defaults to unsigned bitfields. */
5278 if (TARGET_XL_COMPAT)
5279 flag_signed_bitfields = 0;
5280 #endif
5282 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
5283 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
5285 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
5287 /* We can only guarantee the availability of DI pseudo-ops when
5288 assembling for 64-bit targets. */
5289 if (!TARGET_64BIT)
5291 targetm.asm_out.aligned_op.di = NULL;
5292 targetm.asm_out.unaligned_op.di = NULL;
5296 /* Set branch target alignment, if not optimizing for size. */
5297 if (!optimize_size)
5299 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
5300 aligned 8byte to avoid misprediction by the branch predictor. */
5301 if (rs6000_cpu == PROCESSOR_TITAN
5302 || rs6000_cpu == PROCESSOR_CELL)
5304 if (align_functions <= 0)
5305 align_functions = 8;
5306 if (align_jumps <= 0)
5307 align_jumps = 8;
5308 if (align_loops <= 0)
5309 align_loops = 8;
5311 if (rs6000_align_branch_targets)
5313 if (align_functions <= 0)
5314 align_functions = 16;
5315 if (align_jumps <= 0)
5316 align_jumps = 16;
5317 if (align_loops <= 0)
5319 can_override_loop_align = 1;
5320 align_loops = 16;
5323 if (align_jumps_max_skip <= 0)
5324 align_jumps_max_skip = 15;
5325 if (align_loops_max_skip <= 0)
5326 align_loops_max_skip = 15;
5329 /* Arrange to save and restore machine status around nested functions. */
5330 init_machine_status = rs6000_init_machine_status;
5332 /* We should always be splitting complex arguments, but we can't break
5333 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
5334 if (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
5335 targetm.calls.split_complex_arg = NULL;
5337 /* The AIX and ELFv1 ABIs define standard function descriptors. */
5338 if (DEFAULT_ABI == ABI_AIX)
5339 targetm.calls.custom_function_descriptors = 0;
5342 /* Initialize rs6000_cost with the appropriate target costs. */
5343 if (optimize_size)
5344 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
5345 else
5346 switch (rs6000_cpu)
5348 case PROCESSOR_RS64A:
5349 rs6000_cost = &rs64a_cost;
5350 break;
5352 case PROCESSOR_MPCCORE:
5353 rs6000_cost = &mpccore_cost;
5354 break;
5356 case PROCESSOR_PPC403:
5357 rs6000_cost = &ppc403_cost;
5358 break;
5360 case PROCESSOR_PPC405:
5361 rs6000_cost = &ppc405_cost;
5362 break;
5364 case PROCESSOR_PPC440:
5365 rs6000_cost = &ppc440_cost;
5366 break;
5368 case PROCESSOR_PPC476:
5369 rs6000_cost = &ppc476_cost;
5370 break;
5372 case PROCESSOR_PPC601:
5373 rs6000_cost = &ppc601_cost;
5374 break;
5376 case PROCESSOR_PPC603:
5377 rs6000_cost = &ppc603_cost;
5378 break;
5380 case PROCESSOR_PPC604:
5381 rs6000_cost = &ppc604_cost;
5382 break;
5384 case PROCESSOR_PPC604e:
5385 rs6000_cost = &ppc604e_cost;
5386 break;
5388 case PROCESSOR_PPC620:
5389 rs6000_cost = &ppc620_cost;
5390 break;
5392 case PROCESSOR_PPC630:
5393 rs6000_cost = &ppc630_cost;
5394 break;
5396 case PROCESSOR_CELL:
5397 rs6000_cost = &ppccell_cost;
5398 break;
5400 case PROCESSOR_PPC750:
5401 case PROCESSOR_PPC7400:
5402 rs6000_cost = &ppc750_cost;
5403 break;
5405 case PROCESSOR_PPC7450:
5406 rs6000_cost = &ppc7450_cost;
5407 break;
5409 case PROCESSOR_PPC8540:
5410 case PROCESSOR_PPC8548:
5411 rs6000_cost = &ppc8540_cost;
5412 break;
5414 case PROCESSOR_PPCE300C2:
5415 case PROCESSOR_PPCE300C3:
5416 rs6000_cost = &ppce300c2c3_cost;
5417 break;
5419 case PROCESSOR_PPCE500MC:
5420 rs6000_cost = &ppce500mc_cost;
5421 break;
5423 case PROCESSOR_PPCE500MC64:
5424 rs6000_cost = &ppce500mc64_cost;
5425 break;
5427 case PROCESSOR_PPCE5500:
5428 rs6000_cost = &ppce5500_cost;
5429 break;
5431 case PROCESSOR_PPCE6500:
5432 rs6000_cost = &ppce6500_cost;
5433 break;
5435 case PROCESSOR_TITAN:
5436 rs6000_cost = &titan_cost;
5437 break;
5439 case PROCESSOR_POWER4:
5440 case PROCESSOR_POWER5:
5441 rs6000_cost = &power4_cost;
5442 break;
5444 case PROCESSOR_POWER6:
5445 rs6000_cost = &power6_cost;
5446 break;
5448 case PROCESSOR_POWER7:
5449 rs6000_cost = &power7_cost;
5450 break;
5452 case PROCESSOR_POWER8:
5453 rs6000_cost = &power8_cost;
5454 break;
5456 case PROCESSOR_POWER9:
5457 rs6000_cost = &power9_cost;
5458 break;
5460 case PROCESSOR_PPCA2:
5461 rs6000_cost = &ppca2_cost;
5462 break;
5464 default:
5465 gcc_unreachable ();
5468 if (global_init_p)
5470 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
5471 rs6000_cost->simultaneous_prefetches,
5472 global_options.x_param_values,
5473 global_options_set.x_param_values);
5474 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
5475 global_options.x_param_values,
5476 global_options_set.x_param_values);
5477 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
5478 rs6000_cost->cache_line_size,
5479 global_options.x_param_values,
5480 global_options_set.x_param_values);
5481 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
5482 global_options.x_param_values,
5483 global_options_set.x_param_values);
5485 /* Increase loop peeling limits based on performance analysis. */
5486 maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
5487 global_options.x_param_values,
5488 global_options_set.x_param_values);
5489 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
5490 global_options.x_param_values,
5491 global_options_set.x_param_values);
5493 /* Use the 'model' -fsched-pressure algorithm by default. */
5494 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM,
5495 SCHED_PRESSURE_MODEL,
5496 global_options.x_param_values,
5497 global_options_set.x_param_values);
5499 /* If using typedef char *va_list, signal that
5500 __builtin_va_start (&ap, 0) can be optimized to
5501 ap = __builtin_next_arg (0). */
5502 if (DEFAULT_ABI != ABI_V4)
5503 targetm.expand_builtin_va_start = NULL;
5506 /* Set up single/double float flags.
5507 If TARGET_HARD_FLOAT is set, but neither single or double is set,
5508 then set both flags. */
5509 if (TARGET_HARD_FLOAT && TARGET_FPRS
5510 && rs6000_single_float == 0 && rs6000_double_float == 0)
5511 rs6000_single_float = rs6000_double_float = 1;
5513 /* If not explicitly specified via option, decide whether to generate indexed
5514 load/store instructions. A value of -1 indicates that the
5515 initial value of this variable has not been overwritten. During
5516 compilation, TARGET_AVOID_XFORM is either 0 or 1. */
5517 if (TARGET_AVOID_XFORM == -1)
5518 /* Avoid indexed addressing when targeting Power6 in order to avoid the
5519 DERAT mispredict penalty. However the LVE and STVE altivec instructions
5520 need indexed accesses and the type used is the scalar type of the element
5521 being loaded or stored. */
5522 TARGET_AVOID_XFORM = (rs6000_cpu == PROCESSOR_POWER6 && TARGET_CMPB
5523 && !TARGET_ALTIVEC);
5525 /* Set the -mrecip options. */
5526 if (rs6000_recip_name)
5528 char *p = ASTRDUP (rs6000_recip_name);
5529 char *q;
5530 unsigned int mask, i;
5531 bool invert;
5533 while ((q = strtok (p, ",")) != NULL)
5535 p = NULL;
5536 if (*q == '!')
5538 invert = true;
5539 q++;
5541 else
5542 invert = false;
5544 if (!strcmp (q, "default"))
5545 mask = ((TARGET_RECIP_PRECISION)
5546 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
5547 else
5549 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
5550 if (!strcmp (q, recip_options[i].string))
5552 mask = recip_options[i].mask;
5553 break;
5556 if (i == ARRAY_SIZE (recip_options))
5558 error ("unknown option for -mrecip=%s", q);
5559 invert = false;
5560 mask = 0;
5561 ret = false;
5565 if (invert)
5566 rs6000_recip_control &= ~mask;
5567 else
5568 rs6000_recip_control |= mask;
5572 /* Set the builtin mask of the various options used that could affect which
5573 builtins were used. In the past we used target_flags, but we've run out
5574 of bits, and some options like SPE and PAIRED are no longer in
5575 target_flags. */
5576 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
5577 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
5578 rs6000_print_builtin_options (stderr, 0, "builtin mask",
5579 rs6000_builtin_mask);
5581 /* Initialize all of the registers. */
5582 rs6000_init_hard_regno_mode_ok (global_init_p);
5584 /* Save the initial options in case the user does function specific options */
5585 if (global_init_p)
5586 target_option_default_node = target_option_current_node
5587 = build_target_option_node (&global_options);
5589 /* If not explicitly specified via option, decide whether to generate the
5590 extra blr's required to preserve the link stack on some cpus (eg, 476). */
5591 if (TARGET_LINK_STACK == -1)
5592 SET_TARGET_LINK_STACK (rs6000_cpu == PROCESSOR_PPC476 && flag_pic);
5594 return ret;
5597 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
5598 define the target cpu type. */
5600 static void
5601 rs6000_option_override (void)
5603 (void) rs6000_option_override_internal (true);
5607 /* Implement targetm.vectorize.builtin_mask_for_load. */
5608 static tree
5609 rs6000_builtin_mask_for_load (void)
5611 /* Don't use lvsl/vperm for P8 and similarly efficient machines. */
5612 if ((TARGET_ALTIVEC && !TARGET_VSX)
5613 || (TARGET_VSX && !TARGET_EFFICIENT_UNALIGNED_VSX))
5614 return altivec_builtin_mask_for_load;
5615 else
5616 return 0;
5619 /* Implement LOOP_ALIGN. */
5621 rs6000_loop_align (rtx label)
5623 basic_block bb;
5624 int ninsns;
5626 /* Don't override loop alignment if -falign-loops was specified. */
5627 if (!can_override_loop_align)
5628 return align_loops_log;
5630 bb = BLOCK_FOR_INSN (label);
5631 ninsns = num_loop_insns(bb->loop_father);
5633 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
5634 if (ninsns > 4 && ninsns <= 8
5635 && (rs6000_cpu == PROCESSOR_POWER4
5636 || rs6000_cpu == PROCESSOR_POWER5
5637 || rs6000_cpu == PROCESSOR_POWER6
5638 || rs6000_cpu == PROCESSOR_POWER7
5639 || rs6000_cpu == PROCESSOR_POWER8
5640 || rs6000_cpu == PROCESSOR_POWER9))
5641 return 5;
5642 else
5643 return align_loops_log;
5646 /* Implement TARGET_LOOP_ALIGN_MAX_SKIP. */
5647 static int
5648 rs6000_loop_align_max_skip (rtx_insn *label)
5650 return (1 << rs6000_loop_align (label)) - 1;
5653 /* Return true iff, data reference of TYPE can reach vector alignment (16)
5654 after applying N number of iterations. This routine does not determine
5655 how may iterations are required to reach desired alignment. */
5657 static bool
5658 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
5660 if (is_packed)
5661 return false;
5663 if (TARGET_32BIT)
5665 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
5666 return true;
5668 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
5669 return true;
5671 return false;
5673 else
5675 if (TARGET_MACHO)
5676 return false;
5678 /* Assuming that all other types are naturally aligned. CHECKME! */
5679 return true;
5683 /* Return true if the vector misalignment factor is supported by the
5684 target. */
5685 static bool
5686 rs6000_builtin_support_vector_misalignment (machine_mode mode,
5687 const_tree type,
5688 int misalignment,
5689 bool is_packed)
5691 if (TARGET_VSX)
5693 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5694 return true;
5696 /* Return if movmisalign pattern is not supported for this mode. */
5697 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
5698 return false;
5700 if (misalignment == -1)
5702 /* Misalignment factor is unknown at compile time but we know
5703 it's word aligned. */
5704 if (rs6000_vector_alignment_reachable (type, is_packed))
5706 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
5708 if (element_size == 64 || element_size == 32)
5709 return true;
5712 return false;
5715 /* VSX supports word-aligned vector. */
5716 if (misalignment % 4 == 0)
5717 return true;
5719 return false;
5722 /* Implement targetm.vectorize.builtin_vectorization_cost. */
5723 static int
5724 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
5725 tree vectype, int misalign)
5727 unsigned elements;
5728 tree elem_type;
5730 switch (type_of_cost)
5732 case scalar_stmt:
5733 case scalar_load:
5734 case scalar_store:
5735 case vector_stmt:
5736 case vector_load:
5737 case vector_store:
5738 case vec_to_scalar:
5739 case scalar_to_vec:
5740 case cond_branch_not_taken:
5741 return 1;
5743 case vec_perm:
5744 if (TARGET_VSX)
5745 return 3;
5746 else
5747 return 1;
5749 case vec_promote_demote:
5750 if (TARGET_VSX)
5751 return 4;
5752 else
5753 return 1;
5755 case cond_branch_taken:
5756 return 3;
5758 case unaligned_load:
5759 if (TARGET_P9_VECTOR)
5760 return 3;
5762 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5763 return 1;
5765 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5767 elements = TYPE_VECTOR_SUBPARTS (vectype);
5768 if (elements == 2)
5769 /* Double word aligned. */
5770 return 2;
5772 if (elements == 4)
5774 switch (misalign)
5776 case 8:
5777 /* Double word aligned. */
5778 return 2;
5780 case -1:
5781 /* Unknown misalignment. */
5782 case 4:
5783 case 12:
5784 /* Word aligned. */
5785 return 22;
5787 default:
5788 gcc_unreachable ();
5793 if (TARGET_ALTIVEC)
5794 /* Misaligned loads are not supported. */
5795 gcc_unreachable ();
5797 return 2;
5799 case unaligned_store:
5800 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5801 return 1;
5803 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5805 elements = TYPE_VECTOR_SUBPARTS (vectype);
5806 if (elements == 2)
5807 /* Double word aligned. */
5808 return 2;
5810 if (elements == 4)
5812 switch (misalign)
5814 case 8:
5815 /* Double word aligned. */
5816 return 2;
5818 case -1:
5819 /* Unknown misalignment. */
5820 case 4:
5821 case 12:
5822 /* Word aligned. */
5823 return 23;
5825 default:
5826 gcc_unreachable ();
5831 if (TARGET_ALTIVEC)
5832 /* Misaligned stores are not supported. */
5833 gcc_unreachable ();
5835 return 2;
5837 case vec_construct:
5838 /* This is a rough approximation assuming non-constant elements
5839 constructed into a vector via element insertion. FIXME:
5840 vec_construct is not granular enough for uniformly good
5841 decisions. If the initialization is a splat, this is
5842 cheaper than we estimate. Improve this someday. */
5843 elem_type = TREE_TYPE (vectype);
5844 /* 32-bit vectors loaded into registers are stored as double
5845 precision, so we need 2 permutes, 2 converts, and 1 merge
5846 to construct a vector of short floats from them. */
5847 if (SCALAR_FLOAT_TYPE_P (elem_type)
5848 && TYPE_PRECISION (elem_type) == 32)
5849 return 5;
5850 /* On POWER9, integer vector types are built up in GPRs and then
5851 use a direct move (2 cycles). For POWER8 this is even worse,
5852 as we need two direct moves and a merge, and the direct moves
5853 are five cycles. */
5854 else if (INTEGRAL_TYPE_P (elem_type))
5856 if (TARGET_P9_VECTOR)
5857 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 2;
5858 else
5859 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 11;
5861 else
5862 /* V2DFmode doesn't need a direct move. */
5863 return 2;
5865 default:
5866 gcc_unreachable ();
5870 /* Implement targetm.vectorize.preferred_simd_mode. */
5872 static machine_mode
5873 rs6000_preferred_simd_mode (machine_mode mode)
5875 if (TARGET_VSX)
5876 switch (mode)
5878 case DFmode:
5879 return V2DFmode;
5880 default:;
5882 if (TARGET_ALTIVEC || TARGET_VSX)
5883 switch (mode)
5885 case SFmode:
5886 return V4SFmode;
5887 case TImode:
5888 return V1TImode;
5889 case DImode:
5890 return V2DImode;
5891 case SImode:
5892 return V4SImode;
5893 case HImode:
5894 return V8HImode;
5895 case QImode:
5896 return V16QImode;
5897 default:;
5899 if (TARGET_SPE)
5900 switch (mode)
5902 case SFmode:
5903 return V2SFmode;
5904 case SImode:
5905 return V2SImode;
5906 default:;
5908 if (TARGET_PAIRED_FLOAT
5909 && mode == SFmode)
5910 return V2SFmode;
5911 return word_mode;
5914 typedef struct _rs6000_cost_data
5916 struct loop *loop_info;
5917 unsigned cost[3];
5918 } rs6000_cost_data;
5920 /* Test for likely overcommitment of vector hardware resources. If a
5921 loop iteration is relatively large, and too large a percentage of
5922 instructions in the loop are vectorized, the cost model may not
5923 adequately reflect delays from unavailable vector resources.
5924 Penalize the loop body cost for this case. */
5926 static void
5927 rs6000_density_test (rs6000_cost_data *data)
5929 const int DENSITY_PCT_THRESHOLD = 85;
5930 const int DENSITY_SIZE_THRESHOLD = 70;
5931 const int DENSITY_PENALTY = 10;
5932 struct loop *loop = data->loop_info;
5933 basic_block *bbs = get_loop_body (loop);
5934 int nbbs = loop->num_nodes;
5935 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
5936 int i, density_pct;
5938 for (i = 0; i < nbbs; i++)
5940 basic_block bb = bbs[i];
5941 gimple_stmt_iterator gsi;
5943 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5945 gimple *stmt = gsi_stmt (gsi);
5946 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5948 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5949 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
5950 not_vec_cost++;
5954 free (bbs);
5955 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
5957 if (density_pct > DENSITY_PCT_THRESHOLD
5958 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
5960 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
5961 if (dump_enabled_p ())
5962 dump_printf_loc (MSG_NOTE, vect_location,
5963 "density %d%%, cost %d exceeds threshold, penalizing "
5964 "loop body cost by %d%%", density_pct,
5965 vec_cost + not_vec_cost, DENSITY_PENALTY);
5969 /* Implement targetm.vectorize.init_cost. */
5971 /* For each vectorized loop, this var holds TRUE iff a non-memory vector
5972 instruction is needed by the vectorization. */
5973 static bool rs6000_vect_nonmem;
5975 static void *
5976 rs6000_init_cost (struct loop *loop_info)
5978 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
5979 data->loop_info = loop_info;
5980 data->cost[vect_prologue] = 0;
5981 data->cost[vect_body] = 0;
5982 data->cost[vect_epilogue] = 0;
5983 rs6000_vect_nonmem = false;
5984 return data;
5987 /* Implement targetm.vectorize.add_stmt_cost. */
5989 static unsigned
5990 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
5991 struct _stmt_vec_info *stmt_info, int misalign,
5992 enum vect_cost_model_location where)
5994 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5995 unsigned retval = 0;
5997 if (flag_vect_cost_model)
5999 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
6000 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
6001 misalign);
6002 /* Statements in an inner loop relative to the loop being
6003 vectorized are weighted more heavily. The value here is
6004 arbitrary and could potentially be improved with analysis. */
6005 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
6006 count *= 50; /* FIXME. */
6008 retval = (unsigned) (count * stmt_cost);
6009 cost_data->cost[where] += retval;
6011 /* Check whether we're doing something other than just a copy loop.
6012 Not all such loops may be profitably vectorized; see
6013 rs6000_finish_cost. */
6014 if ((kind == vec_to_scalar || kind == vec_perm
6015 || kind == vec_promote_demote || kind == vec_construct
6016 || kind == scalar_to_vec)
6017 || (where == vect_body && kind == vector_stmt))
6018 rs6000_vect_nonmem = true;
6021 return retval;
6024 /* Implement targetm.vectorize.finish_cost. */
6026 static void
6027 rs6000_finish_cost (void *data, unsigned *prologue_cost,
6028 unsigned *body_cost, unsigned *epilogue_cost)
6030 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
6032 if (cost_data->loop_info)
6033 rs6000_density_test (cost_data);
6035 /* Don't vectorize minimum-vectorization-factor, simple copy loops
6036 that require versioning for any reason. The vectorization is at
6037 best a wash inside the loop, and the versioning checks make
6038 profitability highly unlikely and potentially quite harmful. */
6039 if (cost_data->loop_info)
6041 loop_vec_info vec_info = loop_vec_info_for_loop (cost_data->loop_info);
6042 if (!rs6000_vect_nonmem
6043 && LOOP_VINFO_VECT_FACTOR (vec_info) == 2
6044 && LOOP_REQUIRES_VERSIONING (vec_info))
6045 cost_data->cost[vect_body] += 10000;
6048 *prologue_cost = cost_data->cost[vect_prologue];
6049 *body_cost = cost_data->cost[vect_body];
6050 *epilogue_cost = cost_data->cost[vect_epilogue];
6053 /* Implement targetm.vectorize.destroy_cost_data. */
6055 static void
6056 rs6000_destroy_cost_data (void *data)
6058 free (data);
6061 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
6062 library with vectorized intrinsics. */
6064 static tree
6065 rs6000_builtin_vectorized_libmass (combined_fn fn, tree type_out,
6066 tree type_in)
6068 char name[32];
6069 const char *suffix = NULL;
6070 tree fntype, new_fndecl, bdecl = NULL_TREE;
6071 int n_args = 1;
6072 const char *bname;
6073 machine_mode el_mode, in_mode;
6074 int n, in_n;
6076 /* Libmass is suitable for unsafe math only as it does not correctly support
6077 parts of IEEE with the required precision such as denormals. Only support
6078 it if we have VSX to use the simd d2 or f4 functions.
6079 XXX: Add variable length support. */
6080 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
6081 return NULL_TREE;
6083 el_mode = TYPE_MODE (TREE_TYPE (type_out));
6084 n = TYPE_VECTOR_SUBPARTS (type_out);
6085 in_mode = TYPE_MODE (TREE_TYPE (type_in));
6086 in_n = TYPE_VECTOR_SUBPARTS (type_in);
6087 if (el_mode != in_mode
6088 || n != in_n)
6089 return NULL_TREE;
6091 switch (fn)
6093 CASE_CFN_ATAN2:
6094 CASE_CFN_HYPOT:
6095 CASE_CFN_POW:
6096 n_args = 2;
6097 gcc_fallthrough ();
6099 CASE_CFN_ACOS:
6100 CASE_CFN_ACOSH:
6101 CASE_CFN_ASIN:
6102 CASE_CFN_ASINH:
6103 CASE_CFN_ATAN:
6104 CASE_CFN_ATANH:
6105 CASE_CFN_CBRT:
6106 CASE_CFN_COS:
6107 CASE_CFN_COSH:
6108 CASE_CFN_ERF:
6109 CASE_CFN_ERFC:
6110 CASE_CFN_EXP2:
6111 CASE_CFN_EXP:
6112 CASE_CFN_EXPM1:
6113 CASE_CFN_LGAMMA:
6114 CASE_CFN_LOG10:
6115 CASE_CFN_LOG1P:
6116 CASE_CFN_LOG2:
6117 CASE_CFN_LOG:
6118 CASE_CFN_SIN:
6119 CASE_CFN_SINH:
6120 CASE_CFN_SQRT:
6121 CASE_CFN_TAN:
6122 CASE_CFN_TANH:
6123 if (el_mode == DFmode && n == 2)
6125 bdecl = mathfn_built_in (double_type_node, fn);
6126 suffix = "d2"; /* pow -> powd2 */
6128 else if (el_mode == SFmode && n == 4)
6130 bdecl = mathfn_built_in (float_type_node, fn);
6131 suffix = "4"; /* powf -> powf4 */
6133 else
6134 return NULL_TREE;
6135 if (!bdecl)
6136 return NULL_TREE;
6137 break;
6139 default:
6140 return NULL_TREE;
6143 gcc_assert (suffix != NULL);
6144 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
6145 if (!bname)
6146 return NULL_TREE;
6148 strcpy (name, bname + sizeof ("__builtin_") - 1);
6149 strcat (name, suffix);
6151 if (n_args == 1)
6152 fntype = build_function_type_list (type_out, type_in, NULL);
6153 else if (n_args == 2)
6154 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
6155 else
6156 gcc_unreachable ();
6158 /* Build a function declaration for the vectorized function. */
6159 new_fndecl = build_decl (BUILTINS_LOCATION,
6160 FUNCTION_DECL, get_identifier (name), fntype);
6161 TREE_PUBLIC (new_fndecl) = 1;
6162 DECL_EXTERNAL (new_fndecl) = 1;
6163 DECL_IS_NOVOPS (new_fndecl) = 1;
6164 TREE_READONLY (new_fndecl) = 1;
6166 return new_fndecl;
6169 /* Returns a function decl for a vectorized version of the builtin function
6170 with builtin function code FN and the result vector type TYPE, or NULL_TREE
6171 if it is not available. */
6173 static tree
6174 rs6000_builtin_vectorized_function (unsigned int fn, tree type_out,
6175 tree type_in)
6177 machine_mode in_mode, out_mode;
6178 int in_n, out_n;
6180 if (TARGET_DEBUG_BUILTIN)
6181 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
6182 combined_fn_name (combined_fn (fn)),
6183 GET_MODE_NAME (TYPE_MODE (type_out)),
6184 GET_MODE_NAME (TYPE_MODE (type_in)));
6186 if (TREE_CODE (type_out) != VECTOR_TYPE
6187 || TREE_CODE (type_in) != VECTOR_TYPE
6188 || !TARGET_VECTORIZE_BUILTINS)
6189 return NULL_TREE;
6191 out_mode = TYPE_MODE (TREE_TYPE (type_out));
6192 out_n = TYPE_VECTOR_SUBPARTS (type_out);
6193 in_mode = TYPE_MODE (TREE_TYPE (type_in));
6194 in_n = TYPE_VECTOR_SUBPARTS (type_in);
6196 switch (fn)
6198 CASE_CFN_COPYSIGN:
6199 if (VECTOR_UNIT_VSX_P (V2DFmode)
6200 && out_mode == DFmode && out_n == 2
6201 && in_mode == DFmode && in_n == 2)
6202 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
6203 if (VECTOR_UNIT_VSX_P (V4SFmode)
6204 && out_mode == SFmode && out_n == 4
6205 && in_mode == SFmode && in_n == 4)
6206 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
6207 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
6208 && out_mode == SFmode && out_n == 4
6209 && in_mode == SFmode && in_n == 4)
6210 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
6211 break;
6212 CASE_CFN_CEIL:
6213 if (VECTOR_UNIT_VSX_P (V2DFmode)
6214 && out_mode == DFmode && out_n == 2
6215 && in_mode == DFmode && in_n == 2)
6216 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
6217 if (VECTOR_UNIT_VSX_P (V4SFmode)
6218 && out_mode == SFmode && out_n == 4
6219 && in_mode == SFmode && in_n == 4)
6220 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
6221 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
6222 && out_mode == SFmode && out_n == 4
6223 && in_mode == SFmode && in_n == 4)
6224 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
6225 break;
6226 CASE_CFN_FLOOR:
6227 if (VECTOR_UNIT_VSX_P (V2DFmode)
6228 && out_mode == DFmode && out_n == 2
6229 && in_mode == DFmode && in_n == 2)
6230 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
6231 if (VECTOR_UNIT_VSX_P (V4SFmode)
6232 && out_mode == SFmode && out_n == 4
6233 && in_mode == SFmode && in_n == 4)
6234 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
6235 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
6236 && out_mode == SFmode && out_n == 4
6237 && in_mode == SFmode && in_n == 4)
6238 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
6239 break;
6240 CASE_CFN_FMA:
6241 if (VECTOR_UNIT_VSX_P (V2DFmode)
6242 && out_mode == DFmode && out_n == 2
6243 && in_mode == DFmode && in_n == 2)
6244 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
6245 if (VECTOR_UNIT_VSX_P (V4SFmode)
6246 && out_mode == SFmode && out_n == 4
6247 && in_mode == SFmode && in_n == 4)
6248 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
6249 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
6250 && out_mode == SFmode && out_n == 4
6251 && in_mode == SFmode && in_n == 4)
6252 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
6253 break;
6254 CASE_CFN_TRUNC:
6255 if (VECTOR_UNIT_VSX_P (V2DFmode)
6256 && out_mode == DFmode && out_n == 2
6257 && in_mode == DFmode && in_n == 2)
6258 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
6259 if (VECTOR_UNIT_VSX_P (V4SFmode)
6260 && out_mode == SFmode && out_n == 4
6261 && in_mode == SFmode && in_n == 4)
6262 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
6263 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
6264 && out_mode == SFmode && out_n == 4
6265 && in_mode == SFmode && in_n == 4)
6266 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
6267 break;
6268 CASE_CFN_NEARBYINT:
6269 if (VECTOR_UNIT_VSX_P (V2DFmode)
6270 && flag_unsafe_math_optimizations
6271 && out_mode == DFmode && out_n == 2
6272 && in_mode == DFmode && in_n == 2)
6273 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
6274 if (VECTOR_UNIT_VSX_P (V4SFmode)
6275 && flag_unsafe_math_optimizations
6276 && out_mode == SFmode && out_n == 4
6277 && in_mode == SFmode && in_n == 4)
6278 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
6279 break;
6280 CASE_CFN_RINT:
6281 if (VECTOR_UNIT_VSX_P (V2DFmode)
6282 && !flag_trapping_math
6283 && out_mode == DFmode && out_n == 2
6284 && in_mode == DFmode && in_n == 2)
6285 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
6286 if (VECTOR_UNIT_VSX_P (V4SFmode)
6287 && !flag_trapping_math
6288 && out_mode == SFmode && out_n == 4
6289 && in_mode == SFmode && in_n == 4)
6290 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
6291 break;
6292 default:
6293 break;
6296 /* Generate calls to libmass if appropriate. */
6297 if (rs6000_veclib_handler)
6298 return rs6000_veclib_handler (combined_fn (fn), type_out, type_in);
6300 return NULL_TREE;
6303 /* Implement TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION. */
6305 static tree
6306 rs6000_builtin_md_vectorized_function (tree fndecl, tree type_out,
6307 tree type_in)
6309 machine_mode in_mode, out_mode;
6310 int in_n, out_n;
6312 if (TARGET_DEBUG_BUILTIN)
6313 fprintf (stderr, "rs6000_builtin_md_vectorized_function (%s, %s, %s)\n",
6314 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
6315 GET_MODE_NAME (TYPE_MODE (type_out)),
6316 GET_MODE_NAME (TYPE_MODE (type_in)));
6318 if (TREE_CODE (type_out) != VECTOR_TYPE
6319 || TREE_CODE (type_in) != VECTOR_TYPE
6320 || !TARGET_VECTORIZE_BUILTINS)
6321 return NULL_TREE;
6323 out_mode = TYPE_MODE (TREE_TYPE (type_out));
6324 out_n = TYPE_VECTOR_SUBPARTS (type_out);
6325 in_mode = TYPE_MODE (TREE_TYPE (type_in));
6326 in_n = TYPE_VECTOR_SUBPARTS (type_in);
6328 enum rs6000_builtins fn
6329 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
6330 switch (fn)
6332 case RS6000_BUILTIN_RSQRTF:
6333 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
6334 && out_mode == SFmode && out_n == 4
6335 && in_mode == SFmode && in_n == 4)
6336 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
6337 break;
6338 case RS6000_BUILTIN_RSQRT:
6339 if (VECTOR_UNIT_VSX_P (V2DFmode)
6340 && out_mode == DFmode && out_n == 2
6341 && in_mode == DFmode && in_n == 2)
6342 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
6343 break;
6344 case RS6000_BUILTIN_RECIPF:
6345 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
6346 && out_mode == SFmode && out_n == 4
6347 && in_mode == SFmode && in_n == 4)
6348 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
6349 break;
6350 case RS6000_BUILTIN_RECIP:
6351 if (VECTOR_UNIT_VSX_P (V2DFmode)
6352 && out_mode == DFmode && out_n == 2
6353 && in_mode == DFmode && in_n == 2)
6354 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
6355 break;
6356 default:
6357 break;
6359 return NULL_TREE;
6362 /* Default CPU string for rs6000*_file_start functions. */
6363 static const char *rs6000_default_cpu;
6365 /* Do anything needed at the start of the asm file. */
6367 static void
6368 rs6000_file_start (void)
6370 char buffer[80];
6371 const char *start = buffer;
6372 FILE *file = asm_out_file;
6374 rs6000_default_cpu = TARGET_CPU_DEFAULT;
6376 default_file_start ();
6378 if (flag_verbose_asm)
6380 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
6382 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
6384 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
6385 start = "";
6388 if (global_options_set.x_rs6000_cpu_index)
6390 fprintf (file, "%s -mcpu=%s", start,
6391 processor_target_table[rs6000_cpu_index].name);
6392 start = "";
6395 if (global_options_set.x_rs6000_tune_index)
6397 fprintf (file, "%s -mtune=%s", start,
6398 processor_target_table[rs6000_tune_index].name);
6399 start = "";
6402 if (PPC405_ERRATUM77)
6404 fprintf (file, "%s PPC405CR_ERRATUM77", start);
6405 start = "";
6408 #ifdef USING_ELFOS_H
6409 switch (rs6000_sdata)
6411 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
6412 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
6413 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
6414 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
6417 if (rs6000_sdata && g_switch_value)
6419 fprintf (file, "%s -G %d", start,
6420 g_switch_value);
6421 start = "";
6423 #endif
6425 if (*start == '\0')
6426 putc ('\n', file);
6429 #ifdef USING_ELFOS_H
6430 if (!(rs6000_default_cpu && rs6000_default_cpu[0])
6431 && !global_options_set.x_rs6000_cpu_index)
6433 fputs ("\t.machine ", asm_out_file);
6434 if ((rs6000_isa_flags & OPTION_MASK_MODULO) != 0)
6435 fputs ("power9\n", asm_out_file);
6436 else if ((rs6000_isa_flags & OPTION_MASK_DIRECT_MOVE) != 0)
6437 fputs ("power8\n", asm_out_file);
6438 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTD) != 0)
6439 fputs ("power7\n", asm_out_file);
6440 else if ((rs6000_isa_flags & OPTION_MASK_CMPB) != 0)
6441 fputs ("power6\n", asm_out_file);
6442 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTB) != 0)
6443 fputs ("power5\n", asm_out_file);
6444 else if ((rs6000_isa_flags & OPTION_MASK_MFCRF) != 0)
6445 fputs ("power4\n", asm_out_file);
6446 else if ((rs6000_isa_flags & OPTION_MASK_POWERPC64) != 0)
6447 fputs ("ppc64\n", asm_out_file);
6448 else
6449 fputs ("ppc\n", asm_out_file);
6451 #endif
6453 if (DEFAULT_ABI == ABI_ELFv2)
6454 fprintf (file, "\t.abiversion 2\n");
6458 /* Return nonzero if this function is known to have a null epilogue. */
6461 direct_return (void)
6463 if (reload_completed)
6465 rs6000_stack_t *info = rs6000_stack_info ();
6467 if (info->first_gp_reg_save == 32
6468 && info->first_fp_reg_save == 64
6469 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
6470 && ! info->lr_save_p
6471 && ! info->cr_save_p
6472 && info->vrsave_size == 0
6473 && ! info->push_p)
6474 return 1;
6477 return 0;
6480 /* Return the number of instructions it takes to form a constant in an
6481 integer register. */
6484 num_insns_constant_wide (HOST_WIDE_INT value)
6486 /* signed constant loadable with addi */
6487 if (((unsigned HOST_WIDE_INT) value + 0x8000) < 0x10000)
6488 return 1;
6490 /* constant loadable with addis */
6491 else if ((value & 0xffff) == 0
6492 && (value >> 31 == -1 || value >> 31 == 0))
6493 return 1;
6495 else if (TARGET_POWERPC64)
6497 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
6498 HOST_WIDE_INT high = value >> 31;
6500 if (high == 0 || high == -1)
6501 return 2;
6503 high >>= 1;
6505 if (low == 0)
6506 return num_insns_constant_wide (high) + 1;
6507 else if (high == 0)
6508 return num_insns_constant_wide (low) + 1;
6509 else
6510 return (num_insns_constant_wide (high)
6511 + num_insns_constant_wide (low) + 1);
6514 else
6515 return 2;
6519 num_insns_constant (rtx op, machine_mode mode)
6521 HOST_WIDE_INT low, high;
6523 switch (GET_CODE (op))
6525 case CONST_INT:
6526 if ((INTVAL (op) >> 31) != 0 && (INTVAL (op) >> 31) != -1
6527 && rs6000_is_valid_and_mask (op, mode))
6528 return 2;
6529 else
6530 return num_insns_constant_wide (INTVAL (op));
6532 case CONST_WIDE_INT:
6534 int i;
6535 int ins = CONST_WIDE_INT_NUNITS (op) - 1;
6536 for (i = 0; i < CONST_WIDE_INT_NUNITS (op); i++)
6537 ins += num_insns_constant_wide (CONST_WIDE_INT_ELT (op, i));
6538 return ins;
6541 case CONST_DOUBLE:
6542 if (mode == SFmode || mode == SDmode)
6544 long l;
6546 if (DECIMAL_FLOAT_MODE_P (mode))
6547 REAL_VALUE_TO_TARGET_DECIMAL32
6548 (*CONST_DOUBLE_REAL_VALUE (op), l);
6549 else
6550 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), l);
6551 return num_insns_constant_wide ((HOST_WIDE_INT) l);
6554 long l[2];
6555 if (DECIMAL_FLOAT_MODE_P (mode))
6556 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (op), l);
6557 else
6558 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op), l);
6559 high = l[WORDS_BIG_ENDIAN == 0];
6560 low = l[WORDS_BIG_ENDIAN != 0];
6562 if (TARGET_32BIT)
6563 return (num_insns_constant_wide (low)
6564 + num_insns_constant_wide (high));
6565 else
6567 if ((high == 0 && low >= 0)
6568 || (high == -1 && low < 0))
6569 return num_insns_constant_wide (low);
6571 else if (rs6000_is_valid_and_mask (op, mode))
6572 return 2;
6574 else if (low == 0)
6575 return num_insns_constant_wide (high) + 1;
6577 else
6578 return (num_insns_constant_wide (high)
6579 + num_insns_constant_wide (low) + 1);
6582 default:
6583 gcc_unreachable ();
6587 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
6588 If the mode of OP is MODE_VECTOR_INT, this simply returns the
6589 corresponding element of the vector, but for V4SFmode and V2SFmode,
6590 the corresponding "float" is interpreted as an SImode integer. */
6592 HOST_WIDE_INT
6593 const_vector_elt_as_int (rtx op, unsigned int elt)
6595 rtx tmp;
6597 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
6598 gcc_assert (GET_MODE (op) != V2DImode
6599 && GET_MODE (op) != V2DFmode);
6601 tmp = CONST_VECTOR_ELT (op, elt);
6602 if (GET_MODE (op) == V4SFmode
6603 || GET_MODE (op) == V2SFmode)
6604 tmp = gen_lowpart (SImode, tmp);
6605 return INTVAL (tmp);
6608 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
6609 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
6610 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
6611 all items are set to the same value and contain COPIES replicas of the
6612 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
6613 operand and the others are set to the value of the operand's msb. */
6615 static bool
6616 vspltis_constant (rtx op, unsigned step, unsigned copies)
6618 machine_mode mode = GET_MODE (op);
6619 machine_mode inner = GET_MODE_INNER (mode);
6621 unsigned i;
6622 unsigned nunits;
6623 unsigned bitsize;
6624 unsigned mask;
6626 HOST_WIDE_INT val;
6627 HOST_WIDE_INT splat_val;
6628 HOST_WIDE_INT msb_val;
6630 if (mode == V2DImode || mode == V2DFmode || mode == V1TImode)
6631 return false;
6633 nunits = GET_MODE_NUNITS (mode);
6634 bitsize = GET_MODE_BITSIZE (inner);
6635 mask = GET_MODE_MASK (inner);
6637 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6638 splat_val = val;
6639 msb_val = val >= 0 ? 0 : -1;
6641 /* Construct the value to be splatted, if possible. If not, return 0. */
6642 for (i = 2; i <= copies; i *= 2)
6644 HOST_WIDE_INT small_val;
6645 bitsize /= 2;
6646 small_val = splat_val >> bitsize;
6647 mask >>= bitsize;
6648 if (splat_val != ((HOST_WIDE_INT)
6649 ((unsigned HOST_WIDE_INT) small_val << bitsize)
6650 | (small_val & mask)))
6651 return false;
6652 splat_val = small_val;
6655 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
6656 if (EASY_VECTOR_15 (splat_val))
6659 /* Also check if we can splat, and then add the result to itself. Do so if
6660 the value is positive, of if the splat instruction is using OP's mode;
6661 for splat_val < 0, the splat and the add should use the same mode. */
6662 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
6663 && (splat_val >= 0 || (step == 1 && copies == 1)))
6666 /* Also check if are loading up the most significant bit which can be done by
6667 loading up -1 and shifting the value left by -1. */
6668 else if (EASY_VECTOR_MSB (splat_val, inner))
6671 else
6672 return false;
6674 /* Check if VAL is present in every STEP-th element, and the
6675 other elements are filled with its most significant bit. */
6676 for (i = 1; i < nunits; ++i)
6678 HOST_WIDE_INT desired_val;
6679 unsigned elt = BYTES_BIG_ENDIAN ? nunits - 1 - i : i;
6680 if ((i & (step - 1)) == 0)
6681 desired_val = val;
6682 else
6683 desired_val = msb_val;
6685 if (desired_val != const_vector_elt_as_int (op, elt))
6686 return false;
6689 return true;
6692 /* Like vsplitis_constant, but allow the value to be shifted left with a VSLDOI
6693 instruction, filling in the bottom elements with 0 or -1.
6695 Return 0 if the constant cannot be generated with VSLDOI. Return positive
6696 for the number of zeroes to shift in, or negative for the number of 0xff
6697 bytes to shift in.
6699 OP is a CONST_VECTOR. */
6702 vspltis_shifted (rtx op)
6704 machine_mode mode = GET_MODE (op);
6705 machine_mode inner = GET_MODE_INNER (mode);
6707 unsigned i, j;
6708 unsigned nunits;
6709 unsigned mask;
6711 HOST_WIDE_INT val;
6713 if (mode != V16QImode && mode != V8HImode && mode != V4SImode)
6714 return false;
6716 /* We need to create pseudo registers to do the shift, so don't recognize
6717 shift vector constants after reload. */
6718 if (!can_create_pseudo_p ())
6719 return false;
6721 nunits = GET_MODE_NUNITS (mode);
6722 mask = GET_MODE_MASK (inner);
6724 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? 0 : nunits - 1);
6726 /* Check if the value can really be the operand of a vspltis[bhw]. */
6727 if (EASY_VECTOR_15 (val))
6730 /* Also check if we are loading up the most significant bit which can be done
6731 by loading up -1 and shifting the value left by -1. */
6732 else if (EASY_VECTOR_MSB (val, inner))
6735 else
6736 return 0;
6738 /* Check if VAL is present in every STEP-th element until we find elements
6739 that are 0 or all 1 bits. */
6740 for (i = 1; i < nunits; ++i)
6742 unsigned elt = BYTES_BIG_ENDIAN ? i : nunits - 1 - i;
6743 HOST_WIDE_INT elt_val = const_vector_elt_as_int (op, elt);
6745 /* If the value isn't the splat value, check for the remaining elements
6746 being 0/-1. */
6747 if (val != elt_val)
6749 if (elt_val == 0)
6751 for (j = i+1; j < nunits; ++j)
6753 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6754 if (const_vector_elt_as_int (op, elt2) != 0)
6755 return 0;
6758 return (nunits - i) * GET_MODE_SIZE (inner);
6761 else if ((elt_val & mask) == mask)
6763 for (j = i+1; j < nunits; ++j)
6765 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6766 if ((const_vector_elt_as_int (op, elt2) & mask) != mask)
6767 return 0;
6770 return -((nunits - i) * GET_MODE_SIZE (inner));
6773 else
6774 return 0;
6778 /* If all elements are equal, we don't need to do VLSDOI. */
6779 return 0;
6783 /* Return true if OP is of the given MODE and can be synthesized
6784 with a vspltisb, vspltish or vspltisw. */
6786 bool
6787 easy_altivec_constant (rtx op, machine_mode mode)
6789 unsigned step, copies;
6791 if (mode == VOIDmode)
6792 mode = GET_MODE (op);
6793 else if (mode != GET_MODE (op))
6794 return false;
6796 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
6797 constants. */
6798 if (mode == V2DFmode)
6799 return zero_constant (op, mode);
6801 else if (mode == V2DImode)
6803 if (GET_CODE (CONST_VECTOR_ELT (op, 0)) != CONST_INT
6804 || GET_CODE (CONST_VECTOR_ELT (op, 1)) != CONST_INT)
6805 return false;
6807 if (zero_constant (op, mode))
6808 return true;
6810 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
6811 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
6812 return true;
6814 return false;
6817 /* V1TImode is a special container for TImode. Ignore for now. */
6818 else if (mode == V1TImode)
6819 return false;
6821 /* Start with a vspltisw. */
6822 step = GET_MODE_NUNITS (mode) / 4;
6823 copies = 1;
6825 if (vspltis_constant (op, step, copies))
6826 return true;
6828 /* Then try with a vspltish. */
6829 if (step == 1)
6830 copies <<= 1;
6831 else
6832 step >>= 1;
6834 if (vspltis_constant (op, step, copies))
6835 return true;
6837 /* And finally a vspltisb. */
6838 if (step == 1)
6839 copies <<= 1;
6840 else
6841 step >>= 1;
6843 if (vspltis_constant (op, step, copies))
6844 return true;
6846 if (vspltis_shifted (op) != 0)
6847 return true;
6849 return false;
6852 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
6853 result is OP. Abort if it is not possible. */
6856 gen_easy_altivec_constant (rtx op)
6858 machine_mode mode = GET_MODE (op);
6859 int nunits = GET_MODE_NUNITS (mode);
6860 rtx val = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6861 unsigned step = nunits / 4;
6862 unsigned copies = 1;
6864 /* Start with a vspltisw. */
6865 if (vspltis_constant (op, step, copies))
6866 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, val));
6868 /* Then try with a vspltish. */
6869 if (step == 1)
6870 copies <<= 1;
6871 else
6872 step >>= 1;
6874 if (vspltis_constant (op, step, copies))
6875 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, val));
6877 /* And finally a vspltisb. */
6878 if (step == 1)
6879 copies <<= 1;
6880 else
6881 step >>= 1;
6883 if (vspltis_constant (op, step, copies))
6884 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, val));
6886 gcc_unreachable ();
6889 /* Return true if OP is of the given MODE and can be synthesized with ISA 3.0
6890 instructions (xxspltib, vupkhsb/vextsb2w/vextb2d).
6892 Return the number of instructions needed (1 or 2) into the address pointed
6893 via NUM_INSNS_PTR.
6895 Return the constant that is being split via CONSTANT_PTR. */
6897 bool
6898 xxspltib_constant_p (rtx op,
6899 machine_mode mode,
6900 int *num_insns_ptr,
6901 int *constant_ptr)
6903 size_t nunits = GET_MODE_NUNITS (mode);
6904 size_t i;
6905 HOST_WIDE_INT value;
6906 rtx element;
6908 /* Set the returned values to out of bound values. */
6909 *num_insns_ptr = -1;
6910 *constant_ptr = 256;
6912 if (!TARGET_P9_VECTOR)
6913 return false;
6915 if (mode == VOIDmode)
6916 mode = GET_MODE (op);
6918 else if (mode != GET_MODE (op) && GET_MODE (op) != VOIDmode)
6919 return false;
6921 /* Handle (vec_duplicate <constant>). */
6922 if (GET_CODE (op) == VEC_DUPLICATE)
6924 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6925 && mode != V2DImode)
6926 return false;
6928 element = XEXP (op, 0);
6929 if (!CONST_INT_P (element))
6930 return false;
6932 value = INTVAL (element);
6933 if (!IN_RANGE (value, -128, 127))
6934 return false;
6937 /* Handle (const_vector [...]). */
6938 else if (GET_CODE (op) == CONST_VECTOR)
6940 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6941 && mode != V2DImode)
6942 return false;
6944 element = CONST_VECTOR_ELT (op, 0);
6945 if (!CONST_INT_P (element))
6946 return false;
6948 value = INTVAL (element);
6949 if (!IN_RANGE (value, -128, 127))
6950 return false;
6952 for (i = 1; i < nunits; i++)
6954 element = CONST_VECTOR_ELT (op, i);
6955 if (!CONST_INT_P (element))
6956 return false;
6958 if (value != INTVAL (element))
6959 return false;
6963 /* Handle integer constants being loaded into the upper part of the VSX
6964 register as a scalar. If the value isn't 0/-1, only allow it if the mode
6965 can go in Altivec registers. Prefer VSPLTISW/VUPKHSW over XXSPLITIB. */
6966 else if (CONST_INT_P (op))
6968 if (!SCALAR_INT_MODE_P (mode))
6969 return false;
6971 value = INTVAL (op);
6972 if (!IN_RANGE (value, -128, 127))
6973 return false;
6975 if (!IN_RANGE (value, -1, 0))
6977 if (!(reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID))
6978 return false;
6980 if (EASY_VECTOR_15 (value))
6981 return false;
6985 else
6986 return false;
6988 /* See if we could generate vspltisw/vspltish directly instead of xxspltib +
6989 sign extend. Special case 0/-1 to allow getting any VSX register instead
6990 of an Altivec register. */
6991 if ((mode == V4SImode || mode == V8HImode) && !IN_RANGE (value, -1, 0)
6992 && EASY_VECTOR_15 (value))
6993 return false;
6995 /* Return # of instructions and the constant byte for XXSPLTIB. */
6996 if (mode == V16QImode)
6997 *num_insns_ptr = 1;
6999 else if (IN_RANGE (value, -1, 0))
7000 *num_insns_ptr = 1;
7002 else
7003 *num_insns_ptr = 2;
7005 *constant_ptr = (int) value;
7006 return true;
7009 const char *
7010 output_vec_const_move (rtx *operands)
7012 int cst, cst2, shift;
7013 machine_mode mode;
7014 rtx dest, vec;
7016 dest = operands[0];
7017 vec = operands[1];
7018 mode = GET_MODE (dest);
7020 if (TARGET_VSX)
7022 bool dest_vmx_p = ALTIVEC_REGNO_P (REGNO (dest));
7023 int xxspltib_value = 256;
7024 int num_insns = -1;
7026 if (zero_constant (vec, mode))
7028 if (TARGET_P9_VECTOR)
7029 return "xxspltib %x0,0";
7031 else if (dest_vmx_p)
7032 return "vspltisw %0,0";
7034 else
7035 return "xxlxor %x0,%x0,%x0";
7038 if (all_ones_constant (vec, mode))
7040 if (TARGET_P9_VECTOR)
7041 return "xxspltib %x0,255";
7043 else if (dest_vmx_p)
7044 return "vspltisw %0,-1";
7046 else if (TARGET_P8_VECTOR)
7047 return "xxlorc %x0,%x0,%x0";
7049 else
7050 gcc_unreachable ();
7053 if (TARGET_P9_VECTOR
7054 && xxspltib_constant_p (vec, mode, &num_insns, &xxspltib_value))
7056 if (num_insns == 1)
7058 operands[2] = GEN_INT (xxspltib_value & 0xff);
7059 return "xxspltib %x0,%2";
7062 return "#";
7066 if (TARGET_ALTIVEC)
7068 rtx splat_vec;
7070 gcc_assert (ALTIVEC_REGNO_P (REGNO (dest)));
7071 if (zero_constant (vec, mode))
7072 return "vspltisw %0,0";
7074 if (all_ones_constant (vec, mode))
7075 return "vspltisw %0,-1";
7077 /* Do we need to construct a value using VSLDOI? */
7078 shift = vspltis_shifted (vec);
7079 if (shift != 0)
7080 return "#";
7082 splat_vec = gen_easy_altivec_constant (vec);
7083 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
7084 operands[1] = XEXP (splat_vec, 0);
7085 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
7086 return "#";
7088 switch (GET_MODE (splat_vec))
7090 case V4SImode:
7091 return "vspltisw %0,%1";
7093 case V8HImode:
7094 return "vspltish %0,%1";
7096 case V16QImode:
7097 return "vspltisb %0,%1";
7099 default:
7100 gcc_unreachable ();
7104 gcc_assert (TARGET_SPE);
7106 /* Vector constant 0 is handled as a splitter of V2SI, and in the
7107 pattern of V1DI, V4HI, and V2SF.
7109 FIXME: We should probably return # and add post reload
7110 splitters for these, but this way is so easy ;-). */
7111 cst = INTVAL (CONST_VECTOR_ELT (vec, 0));
7112 cst2 = INTVAL (CONST_VECTOR_ELT (vec, 1));
7113 operands[1] = CONST_VECTOR_ELT (vec, 0);
7114 operands[2] = CONST_VECTOR_ELT (vec, 1);
7115 if (cst == cst2)
7116 return "li %0,%1\n\tevmergelo %0,%0,%0";
7117 else if (WORDS_BIG_ENDIAN)
7118 return "li %0,%1\n\tevmergelo %0,%0,%0\n\tli %0,%2";
7119 else
7120 return "li %0,%2\n\tevmergelo %0,%0,%0\n\tli %0,%1";
7123 /* Initialize TARGET of vector PAIRED to VALS. */
7125 void
7126 paired_expand_vector_init (rtx target, rtx vals)
7128 machine_mode mode = GET_MODE (target);
7129 int n_elts = GET_MODE_NUNITS (mode);
7130 int n_var = 0;
7131 rtx x, new_rtx, tmp, constant_op, op1, op2;
7132 int i;
7134 for (i = 0; i < n_elts; ++i)
7136 x = XVECEXP (vals, 0, i);
7137 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
7138 ++n_var;
7140 if (n_var == 0)
7142 /* Load from constant pool. */
7143 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
7144 return;
7147 if (n_var == 2)
7149 /* The vector is initialized only with non-constants. */
7150 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, XVECEXP (vals, 0, 0),
7151 XVECEXP (vals, 0, 1));
7153 emit_move_insn (target, new_rtx);
7154 return;
7157 /* One field is non-constant and the other one is a constant. Load the
7158 constant from the constant pool and use ps_merge instruction to
7159 construct the whole vector. */
7160 op1 = XVECEXP (vals, 0, 0);
7161 op2 = XVECEXP (vals, 0, 1);
7163 constant_op = (CONSTANT_P (op1)) ? op1 : op2;
7165 tmp = gen_reg_rtx (GET_MODE (constant_op));
7166 emit_move_insn (tmp, constant_op);
7168 if (CONSTANT_P (op1))
7169 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, tmp, op2);
7170 else
7171 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, op1, tmp);
7173 emit_move_insn (target, new_rtx);
7176 void
7177 paired_expand_vector_move (rtx operands[])
7179 rtx op0 = operands[0], op1 = operands[1];
7181 emit_move_insn (op0, op1);
7184 /* Emit vector compare for code RCODE. DEST is destination, OP1 and
7185 OP2 are two VEC_COND_EXPR operands, CC_OP0 and CC_OP1 are the two
7186 operands for the relation operation COND. This is a recursive
7187 function. */
7189 static void
7190 paired_emit_vector_compare (enum rtx_code rcode,
7191 rtx dest, rtx op0, rtx op1,
7192 rtx cc_op0, rtx cc_op1)
7194 rtx tmp = gen_reg_rtx (V2SFmode);
7195 rtx tmp1, max, min;
7197 gcc_assert (TARGET_PAIRED_FLOAT);
7198 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
7200 switch (rcode)
7202 case LT:
7203 case LTU:
7204 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
7205 return;
7206 case GE:
7207 case GEU:
7208 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
7209 emit_insn (gen_selv2sf4 (dest, tmp, op0, op1, CONST0_RTX (SFmode)));
7210 return;
7211 case LE:
7212 case LEU:
7213 paired_emit_vector_compare (GE, dest, op0, op1, cc_op1, cc_op0);
7214 return;
7215 case GT:
7216 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
7217 return;
7218 case EQ:
7219 tmp1 = gen_reg_rtx (V2SFmode);
7220 max = gen_reg_rtx (V2SFmode);
7221 min = gen_reg_rtx (V2SFmode);
7222 gen_reg_rtx (V2SFmode);
7224 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
7225 emit_insn (gen_selv2sf4
7226 (max, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
7227 emit_insn (gen_subv2sf3 (tmp, cc_op1, cc_op0));
7228 emit_insn (gen_selv2sf4
7229 (min, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
7230 emit_insn (gen_subv2sf3 (tmp1, min, max));
7231 emit_insn (gen_selv2sf4 (dest, tmp1, op0, op1, CONST0_RTX (SFmode)));
7232 return;
7233 case NE:
7234 paired_emit_vector_compare (EQ, dest, op1, op0, cc_op0, cc_op1);
7235 return;
7236 case UNLE:
7237 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
7238 return;
7239 case UNLT:
7240 paired_emit_vector_compare (LT, dest, op1, op0, cc_op0, cc_op1);
7241 return;
7242 case UNGE:
7243 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
7244 return;
7245 case UNGT:
7246 paired_emit_vector_compare (GT, dest, op1, op0, cc_op0, cc_op1);
7247 return;
7248 default:
7249 gcc_unreachable ();
7252 return;
7255 /* Emit vector conditional expression.
7256 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
7257 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
7260 paired_emit_vector_cond_expr (rtx dest, rtx op1, rtx op2,
7261 rtx cond, rtx cc_op0, rtx cc_op1)
7263 enum rtx_code rcode = GET_CODE (cond);
7265 if (!TARGET_PAIRED_FLOAT)
7266 return 0;
7268 paired_emit_vector_compare (rcode, dest, op1, op2, cc_op0, cc_op1);
7270 return 1;
7273 /* Initialize vector TARGET to VALS. */
7275 void
7276 rs6000_expand_vector_init (rtx target, rtx vals)
7278 machine_mode mode = GET_MODE (target);
7279 machine_mode inner_mode = GET_MODE_INNER (mode);
7280 int n_elts = GET_MODE_NUNITS (mode);
7281 int n_var = 0, one_var = -1;
7282 bool all_same = true, all_const_zero = true;
7283 rtx x, mem;
7284 int i;
7286 for (i = 0; i < n_elts; ++i)
7288 x = XVECEXP (vals, 0, i);
7289 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
7290 ++n_var, one_var = i;
7291 else if (x != CONST0_RTX (inner_mode))
7292 all_const_zero = false;
7294 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
7295 all_same = false;
7298 if (n_var == 0)
7300 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
7301 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
7302 if ((int_vector_p || TARGET_VSX) && all_const_zero)
7304 /* Zero register. */
7305 emit_move_insn (target, CONST0_RTX (mode));
7306 return;
7308 else if (int_vector_p && easy_vector_constant (const_vec, mode))
7310 /* Splat immediate. */
7311 emit_insn (gen_rtx_SET (target, const_vec));
7312 return;
7314 else
7316 /* Load from constant pool. */
7317 emit_move_insn (target, const_vec);
7318 return;
7322 /* Double word values on VSX can use xxpermdi or lxvdsx. */
7323 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
7325 rtx op[2];
7326 size_t i;
7327 size_t num_elements = all_same ? 1 : 2;
7328 for (i = 0; i < num_elements; i++)
7330 op[i] = XVECEXP (vals, 0, i);
7331 /* Just in case there is a SUBREG with a smaller mode, do a
7332 conversion. */
7333 if (GET_MODE (op[i]) != inner_mode)
7335 rtx tmp = gen_reg_rtx (inner_mode);
7336 convert_move (tmp, op[i], 0);
7337 op[i] = tmp;
7339 /* Allow load with splat double word. */
7340 else if (MEM_P (op[i]))
7342 if (!all_same)
7343 op[i] = force_reg (inner_mode, op[i]);
7345 else if (!REG_P (op[i]))
7346 op[i] = force_reg (inner_mode, op[i]);
7349 if (all_same)
7351 if (mode == V2DFmode)
7352 emit_insn (gen_vsx_splat_v2df (target, op[0]));
7353 else
7354 emit_insn (gen_vsx_splat_v2di (target, op[0]));
7356 else
7358 if (mode == V2DFmode)
7359 emit_insn (gen_vsx_concat_v2df (target, op[0], op[1]));
7360 else
7361 emit_insn (gen_vsx_concat_v2di (target, op[0], op[1]));
7363 return;
7366 /* Special case initializing vector int if we are on 64-bit systems with
7367 direct move or we have the ISA 3.0 instructions. */
7368 if (mode == V4SImode && VECTOR_MEM_VSX_P (V4SImode)
7369 && TARGET_DIRECT_MOVE_64BIT)
7371 if (all_same)
7373 rtx element0 = XVECEXP (vals, 0, 0);
7374 if (MEM_P (element0))
7375 element0 = rs6000_address_for_fpconvert (element0);
7376 else
7377 element0 = force_reg (SImode, element0);
7379 if (TARGET_P9_VECTOR)
7380 emit_insn (gen_vsx_splat_v4si (target, element0));
7381 else
7383 rtx tmp = gen_reg_rtx (DImode);
7384 emit_insn (gen_zero_extendsidi2 (tmp, element0));
7385 emit_insn (gen_vsx_splat_v4si_di (target, tmp));
7387 return;
7389 else
7391 rtx elements[4];
7392 size_t i;
7394 for (i = 0; i < 4; i++)
7396 elements[i] = XVECEXP (vals, 0, i);
7397 if (!CONST_INT_P (elements[i]) && !REG_P (elements[i]))
7398 elements[i] = copy_to_mode_reg (SImode, elements[i]);
7401 emit_insn (gen_vsx_init_v4si (target, elements[0], elements[1],
7402 elements[2], elements[3]));
7403 return;
7407 /* With single precision floating point on VSX, know that internally single
7408 precision is actually represented as a double, and either make 2 V2DF
7409 vectors, and convert these vectors to single precision, or do one
7410 conversion, and splat the result to the other elements. */
7411 if (mode == V4SFmode && VECTOR_MEM_VSX_P (V4SFmode))
7413 if (all_same)
7415 rtx element0 = XVECEXP (vals, 0, 0);
7417 if (TARGET_P9_VECTOR)
7419 if (MEM_P (element0))
7420 element0 = rs6000_address_for_fpconvert (element0);
7422 emit_insn (gen_vsx_splat_v4sf (target, element0));
7425 else
7427 rtx freg = gen_reg_rtx (V4SFmode);
7428 rtx sreg = force_reg (SFmode, element0);
7429 rtx cvt = (TARGET_XSCVDPSPN
7430 ? gen_vsx_xscvdpspn_scalar (freg, sreg)
7431 : gen_vsx_xscvdpsp_scalar (freg, sreg));
7433 emit_insn (cvt);
7434 emit_insn (gen_vsx_xxspltw_v4sf_direct (target, freg,
7435 const0_rtx));
7438 else
7440 rtx dbl_even = gen_reg_rtx (V2DFmode);
7441 rtx dbl_odd = gen_reg_rtx (V2DFmode);
7442 rtx flt_even = gen_reg_rtx (V4SFmode);
7443 rtx flt_odd = gen_reg_rtx (V4SFmode);
7444 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
7445 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
7446 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
7447 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
7449 /* Use VMRGEW if we can instead of doing a permute. */
7450 if (TARGET_P8_VECTOR)
7452 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op2));
7453 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op1, op3));
7454 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
7455 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
7456 if (BYTES_BIG_ENDIAN)
7457 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_even, flt_odd));
7458 else
7459 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_odd, flt_even));
7461 else
7463 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
7464 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
7465 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
7466 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
7467 rs6000_expand_extract_even (target, flt_even, flt_odd);
7470 return;
7473 /* Special case initializing vector short/char that are splats if we are on
7474 64-bit systems with direct move. */
7475 if (all_same && TARGET_DIRECT_MOVE_64BIT
7476 && (mode == V16QImode || mode == V8HImode))
7478 rtx op0 = XVECEXP (vals, 0, 0);
7479 rtx di_tmp = gen_reg_rtx (DImode);
7481 if (!REG_P (op0))
7482 op0 = force_reg (GET_MODE_INNER (mode), op0);
7484 if (mode == V16QImode)
7486 emit_insn (gen_zero_extendqidi2 (di_tmp, op0));
7487 emit_insn (gen_vsx_vspltb_di (target, di_tmp));
7488 return;
7491 if (mode == V8HImode)
7493 emit_insn (gen_zero_extendhidi2 (di_tmp, op0));
7494 emit_insn (gen_vsx_vsplth_di (target, di_tmp));
7495 return;
7499 /* Store value to stack temp. Load vector element. Splat. However, splat
7500 of 64-bit items is not supported on Altivec. */
7501 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
7503 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
7504 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
7505 XVECEXP (vals, 0, 0));
7506 x = gen_rtx_UNSPEC (VOIDmode,
7507 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
7508 emit_insn (gen_rtx_PARALLEL (VOIDmode,
7509 gen_rtvec (2,
7510 gen_rtx_SET (target, mem),
7511 x)));
7512 x = gen_rtx_VEC_SELECT (inner_mode, target,
7513 gen_rtx_PARALLEL (VOIDmode,
7514 gen_rtvec (1, const0_rtx)));
7515 emit_insn (gen_rtx_SET (target, gen_rtx_VEC_DUPLICATE (mode, x)));
7516 return;
7519 /* One field is non-constant. Load constant then overwrite
7520 varying field. */
7521 if (n_var == 1)
7523 rtx copy = copy_rtx (vals);
7525 /* Load constant part of vector, substitute neighboring value for
7526 varying element. */
7527 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
7528 rs6000_expand_vector_init (target, copy);
7530 /* Insert variable. */
7531 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
7532 return;
7535 /* Construct the vector in memory one field at a time
7536 and load the whole vector. */
7537 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
7538 for (i = 0; i < n_elts; i++)
7539 emit_move_insn (adjust_address_nv (mem, inner_mode,
7540 i * GET_MODE_SIZE (inner_mode)),
7541 XVECEXP (vals, 0, i));
7542 emit_move_insn (target, mem);
7545 /* Set field ELT of TARGET to VAL. */
7547 void
7548 rs6000_expand_vector_set (rtx target, rtx val, int elt)
7550 machine_mode mode = GET_MODE (target);
7551 machine_mode inner_mode = GET_MODE_INNER (mode);
7552 rtx reg = gen_reg_rtx (mode);
7553 rtx mask, mem, x;
7554 int width = GET_MODE_SIZE (inner_mode);
7555 int i;
7557 val = force_reg (GET_MODE (val), val);
7559 if (VECTOR_MEM_VSX_P (mode))
7561 rtx insn = NULL_RTX;
7562 rtx elt_rtx = GEN_INT (elt);
7564 if (mode == V2DFmode)
7565 insn = gen_vsx_set_v2df (target, target, val, elt_rtx);
7567 else if (mode == V2DImode)
7568 insn = gen_vsx_set_v2di (target, target, val, elt_rtx);
7570 else if (TARGET_P9_VECTOR && TARGET_VSX_SMALL_INTEGER
7571 && TARGET_UPPER_REGS_DI && TARGET_POWERPC64)
7573 if (mode == V4SImode)
7574 insn = gen_vsx_set_v4si_p9 (target, target, val, elt_rtx);
7575 else if (mode == V8HImode)
7576 insn = gen_vsx_set_v8hi_p9 (target, target, val, elt_rtx);
7577 else if (mode == V16QImode)
7578 insn = gen_vsx_set_v16qi_p9 (target, target, val, elt_rtx);
7581 if (insn)
7583 emit_insn (insn);
7584 return;
7588 /* Simplify setting single element vectors like V1TImode. */
7589 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE (inner_mode) && elt == 0)
7591 emit_move_insn (target, gen_lowpart (mode, val));
7592 return;
7595 /* Load single variable value. */
7596 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
7597 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
7598 x = gen_rtx_UNSPEC (VOIDmode,
7599 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
7600 emit_insn (gen_rtx_PARALLEL (VOIDmode,
7601 gen_rtvec (2,
7602 gen_rtx_SET (reg, mem),
7603 x)));
7605 /* Linear sequence. */
7606 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
7607 for (i = 0; i < 16; ++i)
7608 XVECEXP (mask, 0, i) = GEN_INT (i);
7610 /* Set permute mask to insert element into target. */
7611 for (i = 0; i < width; ++i)
7612 XVECEXP (mask, 0, elt*width + i)
7613 = GEN_INT (i + 0x10);
7614 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
7616 if (BYTES_BIG_ENDIAN)
7617 x = gen_rtx_UNSPEC (mode,
7618 gen_rtvec (3, target, reg,
7619 force_reg (V16QImode, x)),
7620 UNSPEC_VPERM);
7621 else
7623 if (TARGET_P9_VECTOR)
7624 x = gen_rtx_UNSPEC (mode,
7625 gen_rtvec (3, target, reg,
7626 force_reg (V16QImode, x)),
7627 UNSPEC_VPERMR);
7628 else
7630 /* Invert selector. We prefer to generate VNAND on P8 so
7631 that future fusion opportunities can kick in, but must
7632 generate VNOR elsewhere. */
7633 rtx notx = gen_rtx_NOT (V16QImode, force_reg (V16QImode, x));
7634 rtx iorx = (TARGET_P8_VECTOR
7635 ? gen_rtx_IOR (V16QImode, notx, notx)
7636 : gen_rtx_AND (V16QImode, notx, notx));
7637 rtx tmp = gen_reg_rtx (V16QImode);
7638 emit_insn (gen_rtx_SET (tmp, iorx));
7640 /* Permute with operands reversed and adjusted selector. */
7641 x = gen_rtx_UNSPEC (mode, gen_rtvec (3, reg, target, tmp),
7642 UNSPEC_VPERM);
7646 emit_insn (gen_rtx_SET (target, x));
7649 /* Extract field ELT from VEC into TARGET. */
7651 void
7652 rs6000_expand_vector_extract (rtx target, rtx vec, rtx elt)
7654 machine_mode mode = GET_MODE (vec);
7655 machine_mode inner_mode = GET_MODE_INNER (mode);
7656 rtx mem;
7658 if (VECTOR_MEM_VSX_P (mode) && CONST_INT_P (elt))
7660 switch (mode)
7662 default:
7663 break;
7664 case V1TImode:
7665 gcc_assert (INTVAL (elt) == 0 && inner_mode == TImode);
7666 emit_move_insn (target, gen_lowpart (TImode, vec));
7667 break;
7668 case V2DFmode:
7669 emit_insn (gen_vsx_extract_v2df (target, vec, elt));
7670 return;
7671 case V2DImode:
7672 emit_insn (gen_vsx_extract_v2di (target, vec, elt));
7673 return;
7674 case V4SFmode:
7675 emit_insn (gen_vsx_extract_v4sf (target, vec, elt));
7676 return;
7677 case V16QImode:
7678 if (TARGET_DIRECT_MOVE_64BIT)
7680 emit_insn (gen_vsx_extract_v16qi (target, vec, elt));
7681 return;
7683 else
7684 break;
7685 case V8HImode:
7686 if (TARGET_DIRECT_MOVE_64BIT)
7688 emit_insn (gen_vsx_extract_v8hi (target, vec, elt));
7689 return;
7691 else
7692 break;
7693 case V4SImode:
7694 if (TARGET_DIRECT_MOVE_64BIT)
7696 emit_insn (gen_vsx_extract_v4si (target, vec, elt));
7697 return;
7699 break;
7702 else if (VECTOR_MEM_VSX_P (mode) && !CONST_INT_P (elt)
7703 && TARGET_DIRECT_MOVE_64BIT)
7705 if (GET_MODE (elt) != DImode)
7707 rtx tmp = gen_reg_rtx (DImode);
7708 convert_move (tmp, elt, 0);
7709 elt = tmp;
7711 else if (!REG_P (elt))
7712 elt = force_reg (DImode, elt);
7714 switch (mode)
7716 case V2DFmode:
7717 emit_insn (gen_vsx_extract_v2df_var (target, vec, elt));
7718 return;
7720 case V2DImode:
7721 emit_insn (gen_vsx_extract_v2di_var (target, vec, elt));
7722 return;
7724 case V4SFmode:
7725 emit_insn (gen_vsx_extract_v4sf_var (target, vec, elt));
7726 return;
7728 case V4SImode:
7729 emit_insn (gen_vsx_extract_v4si_var (target, vec, elt));
7730 return;
7732 case V8HImode:
7733 emit_insn (gen_vsx_extract_v8hi_var (target, vec, elt));
7734 return;
7736 case V16QImode:
7737 emit_insn (gen_vsx_extract_v16qi_var (target, vec, elt));
7738 return;
7740 default:
7741 gcc_unreachable ();
7745 gcc_assert (CONST_INT_P (elt));
7747 /* Allocate mode-sized buffer. */
7748 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
7750 emit_move_insn (mem, vec);
7752 /* Add offset to field within buffer matching vector element. */
7753 mem = adjust_address_nv (mem, inner_mode,
7754 INTVAL (elt) * GET_MODE_SIZE (inner_mode));
7756 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
7759 /* Helper function to return the register number of a RTX. */
7760 static inline int
7761 regno_or_subregno (rtx op)
7763 if (REG_P (op))
7764 return REGNO (op);
7765 else if (SUBREG_P (op))
7766 return subreg_regno (op);
7767 else
7768 gcc_unreachable ();
7771 /* Adjust a memory address (MEM) of a vector type to point to a scalar field
7772 within the vector (ELEMENT) with a mode (SCALAR_MODE). Use a base register
7773 temporary (BASE_TMP) to fixup the address. Return the new memory address
7774 that is valid for reads or writes to a given register (SCALAR_REG). */
7777 rs6000_adjust_vec_address (rtx scalar_reg,
7778 rtx mem,
7779 rtx element,
7780 rtx base_tmp,
7781 machine_mode scalar_mode)
7783 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7784 rtx addr = XEXP (mem, 0);
7785 rtx element_offset;
7786 rtx new_addr;
7787 bool valid_addr_p;
7789 /* Vector addresses should not have PRE_INC, PRE_DEC, or PRE_MODIFY. */
7790 gcc_assert (GET_RTX_CLASS (GET_CODE (addr)) != RTX_AUTOINC);
7792 /* Calculate what we need to add to the address to get the element
7793 address. */
7794 if (CONST_INT_P (element))
7795 element_offset = GEN_INT (INTVAL (element) * scalar_size);
7796 else
7798 int byte_shift = exact_log2 (scalar_size);
7799 gcc_assert (byte_shift >= 0);
7801 if (byte_shift == 0)
7802 element_offset = element;
7804 else
7806 if (TARGET_POWERPC64)
7807 emit_insn (gen_ashldi3 (base_tmp, element, GEN_INT (byte_shift)));
7808 else
7809 emit_insn (gen_ashlsi3 (base_tmp, element, GEN_INT (byte_shift)));
7811 element_offset = base_tmp;
7815 /* Create the new address pointing to the element within the vector. If we
7816 are adding 0, we don't have to change the address. */
7817 if (element_offset == const0_rtx)
7818 new_addr = addr;
7820 /* A simple indirect address can be converted into a reg + offset
7821 address. */
7822 else if (REG_P (addr) || SUBREG_P (addr))
7823 new_addr = gen_rtx_PLUS (Pmode, addr, element_offset);
7825 /* Optimize D-FORM addresses with constant offset with a constant element, to
7826 include the element offset in the address directly. */
7827 else if (GET_CODE (addr) == PLUS)
7829 rtx op0 = XEXP (addr, 0);
7830 rtx op1 = XEXP (addr, 1);
7831 rtx insn;
7833 gcc_assert (REG_P (op0) || SUBREG_P (op0));
7834 if (CONST_INT_P (op1) && CONST_INT_P (element_offset))
7836 HOST_WIDE_INT offset = INTVAL (op1) + INTVAL (element_offset);
7837 rtx offset_rtx = GEN_INT (offset);
7839 if (IN_RANGE (offset, -32768, 32767)
7840 && (scalar_size < 8 || (offset & 0x3) == 0))
7841 new_addr = gen_rtx_PLUS (Pmode, op0, offset_rtx);
7842 else
7844 emit_move_insn (base_tmp, offset_rtx);
7845 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7848 else
7850 bool op1_reg_p = (REG_P (op1) || SUBREG_P (op1));
7851 bool ele_reg_p = (REG_P (element_offset) || SUBREG_P (element_offset));
7853 /* Note, ADDI requires the register being added to be a base
7854 register. If the register was R0, load it up into the temporary
7855 and do the add. */
7856 if (op1_reg_p
7857 && (ele_reg_p || reg_or_subregno (op1) != FIRST_GPR_REGNO))
7859 insn = gen_add3_insn (base_tmp, op1, element_offset);
7860 gcc_assert (insn != NULL_RTX);
7861 emit_insn (insn);
7864 else if (ele_reg_p
7865 && reg_or_subregno (element_offset) != FIRST_GPR_REGNO)
7867 insn = gen_add3_insn (base_tmp, element_offset, op1);
7868 gcc_assert (insn != NULL_RTX);
7869 emit_insn (insn);
7872 else
7874 emit_move_insn (base_tmp, op1);
7875 emit_insn (gen_add2_insn (base_tmp, element_offset));
7878 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7882 else
7884 emit_move_insn (base_tmp, addr);
7885 new_addr = gen_rtx_PLUS (Pmode, base_tmp, element_offset);
7888 /* If we have a PLUS, we need to see whether the particular register class
7889 allows for D-FORM or X-FORM addressing. */
7890 if (GET_CODE (new_addr) == PLUS)
7892 rtx op1 = XEXP (new_addr, 1);
7893 addr_mask_type addr_mask;
7894 int scalar_regno = regno_or_subregno (scalar_reg);
7896 gcc_assert (scalar_regno < FIRST_PSEUDO_REGISTER);
7897 if (INT_REGNO_P (scalar_regno))
7898 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_GPR];
7900 else if (FP_REGNO_P (scalar_regno))
7901 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_FPR];
7903 else if (ALTIVEC_REGNO_P (scalar_regno))
7904 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_VMX];
7906 else
7907 gcc_unreachable ();
7909 if (REG_P (op1) || SUBREG_P (op1))
7910 valid_addr_p = (addr_mask & RELOAD_REG_INDEXED) != 0;
7911 else
7912 valid_addr_p = (addr_mask & RELOAD_REG_OFFSET) != 0;
7915 else if (REG_P (new_addr) || SUBREG_P (new_addr))
7916 valid_addr_p = true;
7918 else
7919 valid_addr_p = false;
7921 if (!valid_addr_p)
7923 emit_move_insn (base_tmp, new_addr);
7924 new_addr = base_tmp;
7927 return change_address (mem, scalar_mode, new_addr);
7930 /* Split a variable vec_extract operation into the component instructions. */
7932 void
7933 rs6000_split_vec_extract_var (rtx dest, rtx src, rtx element, rtx tmp_gpr,
7934 rtx tmp_altivec)
7936 machine_mode mode = GET_MODE (src);
7937 machine_mode scalar_mode = GET_MODE (dest);
7938 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7939 int byte_shift = exact_log2 (scalar_size);
7941 gcc_assert (byte_shift >= 0);
7943 /* If we are given a memory address, optimize to load just the element. We
7944 don't have to adjust the vector element number on little endian
7945 systems. */
7946 if (MEM_P (src))
7948 gcc_assert (REG_P (tmp_gpr));
7949 emit_move_insn (dest, rs6000_adjust_vec_address (dest, src, element,
7950 tmp_gpr, scalar_mode));
7951 return;
7954 else if (REG_P (src) || SUBREG_P (src))
7956 int bit_shift = byte_shift + 3;
7957 rtx element2;
7958 int dest_regno = regno_or_subregno (dest);
7959 int src_regno = regno_or_subregno (src);
7960 int element_regno = regno_or_subregno (element);
7962 gcc_assert (REG_P (tmp_gpr));
7964 /* See if we want to generate VEXTU{B,H,W}{L,R}X if the destination is in
7965 a general purpose register. */
7966 if (TARGET_P9_VECTOR
7967 && (mode == V16QImode || mode == V8HImode || mode == V4SImode)
7968 && INT_REGNO_P (dest_regno)
7969 && ALTIVEC_REGNO_P (src_regno)
7970 && INT_REGNO_P (element_regno))
7972 rtx dest_si = gen_rtx_REG (SImode, dest_regno);
7973 rtx element_si = gen_rtx_REG (SImode, element_regno);
7975 if (mode == V16QImode)
7976 emit_insn (VECTOR_ELT_ORDER_BIG
7977 ? gen_vextublx (dest_si, element_si, src)
7978 : gen_vextubrx (dest_si, element_si, src));
7980 else if (mode == V8HImode)
7982 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7983 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const1_rtx));
7984 emit_insn (VECTOR_ELT_ORDER_BIG
7985 ? gen_vextuhlx (dest_si, tmp_gpr_si, src)
7986 : gen_vextuhrx (dest_si, tmp_gpr_si, src));
7990 else
7992 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7993 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const2_rtx));
7994 emit_insn (VECTOR_ELT_ORDER_BIG
7995 ? gen_vextuwlx (dest_si, tmp_gpr_si, src)
7996 : gen_vextuwrx (dest_si, tmp_gpr_si, src));
7999 return;
8003 gcc_assert (REG_P (tmp_altivec));
8005 /* For little endian, adjust element ordering. For V2DI/V2DF, we can use
8006 an XOR, otherwise we need to subtract. The shift amount is so VSLO
8007 will shift the element into the upper position (adding 3 to convert a
8008 byte shift into a bit shift). */
8009 if (scalar_size == 8)
8011 if (!VECTOR_ELT_ORDER_BIG)
8013 emit_insn (gen_xordi3 (tmp_gpr, element, const1_rtx));
8014 element2 = tmp_gpr;
8016 else
8017 element2 = element;
8019 /* Generate RLDIC directly to shift left 6 bits and retrieve 1
8020 bit. */
8021 emit_insn (gen_rtx_SET (tmp_gpr,
8022 gen_rtx_AND (DImode,
8023 gen_rtx_ASHIFT (DImode,
8024 element2,
8025 GEN_INT (6)),
8026 GEN_INT (64))));
8028 else
8030 if (!VECTOR_ELT_ORDER_BIG)
8032 rtx num_ele_m1 = GEN_INT (GET_MODE_NUNITS (mode) - 1);
8034 emit_insn (gen_anddi3 (tmp_gpr, element, num_ele_m1));
8035 emit_insn (gen_subdi3 (tmp_gpr, num_ele_m1, tmp_gpr));
8036 element2 = tmp_gpr;
8038 else
8039 element2 = element;
8041 emit_insn (gen_ashldi3 (tmp_gpr, element2, GEN_INT (bit_shift)));
8044 /* Get the value into the lower byte of the Altivec register where VSLO
8045 expects it. */
8046 if (TARGET_P9_VECTOR)
8047 emit_insn (gen_vsx_splat_v2di (tmp_altivec, tmp_gpr));
8048 else if (can_create_pseudo_p ())
8049 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_gpr, tmp_gpr));
8050 else
8052 rtx tmp_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
8053 emit_move_insn (tmp_di, tmp_gpr);
8054 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_di, tmp_di));
8057 /* Do the VSLO to get the value into the final location. */
8058 switch (mode)
8060 case V2DFmode:
8061 emit_insn (gen_vsx_vslo_v2df (dest, src, tmp_altivec));
8062 return;
8064 case V2DImode:
8065 emit_insn (gen_vsx_vslo_v2di (dest, src, tmp_altivec));
8066 return;
8068 case V4SFmode:
8070 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
8071 rtx tmp_altivec_v4sf = gen_rtx_REG (V4SFmode, REGNO (tmp_altivec));
8072 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
8073 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
8074 tmp_altivec));
8076 emit_insn (gen_vsx_xscvspdp_scalar2 (dest, tmp_altivec_v4sf));
8077 return;
8080 case V4SImode:
8081 case V8HImode:
8082 case V16QImode:
8084 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
8085 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
8086 rtx tmp_gpr_di = gen_rtx_REG (DImode, REGNO (dest));
8087 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
8088 tmp_altivec));
8089 emit_move_insn (tmp_gpr_di, tmp_altivec_di);
8090 emit_insn (gen_ashrdi3 (tmp_gpr_di, tmp_gpr_di,
8091 GEN_INT (64 - (8 * scalar_size))));
8092 return;
8095 default:
8096 gcc_unreachable ();
8099 return;
8101 else
8102 gcc_unreachable ();
8105 /* Helper function for rs6000_split_v4si_init to build up a DImode value from
8106 two SImode values. */
8108 static void
8109 rs6000_split_v4si_init_di_reg (rtx dest, rtx si1, rtx si2, rtx tmp)
8111 const unsigned HOST_WIDE_INT mask_32bit = HOST_WIDE_INT_C (0xffffffff);
8113 if (CONST_INT_P (si1) && CONST_INT_P (si2))
8115 unsigned HOST_WIDE_INT const1 = (UINTVAL (si1) & mask_32bit) << 32;
8116 unsigned HOST_WIDE_INT const2 = UINTVAL (si2) & mask_32bit;
8118 emit_move_insn (dest, GEN_INT (const1 | const2));
8119 return;
8122 /* Put si1 into upper 32-bits of dest. */
8123 if (CONST_INT_P (si1))
8124 emit_move_insn (dest, GEN_INT ((UINTVAL (si1) & mask_32bit) << 32));
8125 else
8127 /* Generate RLDIC. */
8128 rtx si1_di = gen_rtx_REG (DImode, regno_or_subregno (si1));
8129 rtx shift_rtx = gen_rtx_ASHIFT (DImode, si1_di, GEN_INT (32));
8130 rtx mask_rtx = GEN_INT (mask_32bit << 32);
8131 rtx and_rtx = gen_rtx_AND (DImode, shift_rtx, mask_rtx);
8132 gcc_assert (!reg_overlap_mentioned_p (dest, si1));
8133 emit_insn (gen_rtx_SET (dest, and_rtx));
8136 /* Put si2 into the temporary. */
8137 gcc_assert (!reg_overlap_mentioned_p (dest, tmp));
8138 if (CONST_INT_P (si2))
8139 emit_move_insn (tmp, GEN_INT (UINTVAL (si2) & mask_32bit));
8140 else
8141 emit_insn (gen_zero_extendsidi2 (tmp, si2));
8143 /* Combine the two parts. */
8144 emit_insn (gen_iordi3 (dest, dest, tmp));
8145 return;
8148 /* Split a V4SI initialization. */
8150 void
8151 rs6000_split_v4si_init (rtx operands[])
8153 rtx dest = operands[0];
8155 /* Destination is a GPR, build up the two DImode parts in place. */
8156 if (REG_P (dest) || SUBREG_P (dest))
8158 int d_regno = regno_or_subregno (dest);
8159 rtx scalar1 = operands[1];
8160 rtx scalar2 = operands[2];
8161 rtx scalar3 = operands[3];
8162 rtx scalar4 = operands[4];
8163 rtx tmp1 = operands[5];
8164 rtx tmp2 = operands[6];
8166 /* Even though we only need one temporary (plus the destination, which
8167 has an early clobber constraint, try to use two temporaries, one for
8168 each double word created. That way the 2nd insn scheduling pass can
8169 rearrange things so the two parts are done in parallel. */
8170 if (BYTES_BIG_ENDIAN)
8172 rtx di_lo = gen_rtx_REG (DImode, d_regno);
8173 rtx di_hi = gen_rtx_REG (DImode, d_regno + 1);
8174 rs6000_split_v4si_init_di_reg (di_lo, scalar1, scalar2, tmp1);
8175 rs6000_split_v4si_init_di_reg (di_hi, scalar3, scalar4, tmp2);
8177 else
8179 rtx di_lo = gen_rtx_REG (DImode, d_regno + 1);
8180 rtx di_hi = gen_rtx_REG (DImode, d_regno);
8181 gcc_assert (!VECTOR_ELT_ORDER_BIG);
8182 rs6000_split_v4si_init_di_reg (di_lo, scalar4, scalar3, tmp1);
8183 rs6000_split_v4si_init_di_reg (di_hi, scalar2, scalar1, tmp2);
8185 return;
8188 else
8189 gcc_unreachable ();
8192 /* Return TRUE if OP is an invalid SUBREG operation on the e500. */
8194 bool
8195 invalid_e500_subreg (rtx op, machine_mode mode)
8197 if (TARGET_E500_DOUBLE)
8199 /* Reject (subreg:SI (reg:DF)); likewise with subreg:DI or
8200 subreg:TI and reg:TF. Decimal float modes are like integer
8201 modes (only low part of each register used) for this
8202 purpose. */
8203 if (GET_CODE (op) == SUBREG
8204 && (mode == SImode || mode == DImode || mode == TImode
8205 || mode == DDmode || mode == TDmode || mode == PTImode)
8206 && REG_P (SUBREG_REG (op))
8207 && (GET_MODE (SUBREG_REG (op)) == DFmode
8208 || GET_MODE (SUBREG_REG (op)) == TFmode
8209 || GET_MODE (SUBREG_REG (op)) == IFmode
8210 || GET_MODE (SUBREG_REG (op)) == KFmode))
8211 return true;
8213 /* Reject (subreg:DF (reg:DI)); likewise with subreg:TF and
8214 reg:TI. */
8215 if (GET_CODE (op) == SUBREG
8216 && (mode == DFmode || mode == TFmode || mode == IFmode
8217 || mode == KFmode)
8218 && REG_P (SUBREG_REG (op))
8219 && (GET_MODE (SUBREG_REG (op)) == DImode
8220 || GET_MODE (SUBREG_REG (op)) == TImode
8221 || GET_MODE (SUBREG_REG (op)) == PTImode
8222 || GET_MODE (SUBREG_REG (op)) == DDmode
8223 || GET_MODE (SUBREG_REG (op)) == TDmode))
8224 return true;
8227 if (TARGET_SPE
8228 && GET_CODE (op) == SUBREG
8229 && mode == SImode
8230 && REG_P (SUBREG_REG (op))
8231 && SPE_VECTOR_MODE (GET_MODE (SUBREG_REG (op))))
8232 return true;
8234 return false;
8237 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
8238 selects whether the alignment is abi mandated, optional, or
8239 both abi and optional alignment. */
8241 unsigned int
8242 rs6000_data_alignment (tree type, unsigned int align, enum data_align how)
8244 if (how != align_opt)
8246 if (TREE_CODE (type) == VECTOR_TYPE)
8248 if ((TARGET_SPE && SPE_VECTOR_MODE (TYPE_MODE (type)))
8249 || (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (TYPE_MODE (type))))
8251 if (align < 64)
8252 align = 64;
8254 else if (align < 128)
8255 align = 128;
8257 else if (TARGET_E500_DOUBLE
8258 && TREE_CODE (type) == REAL_TYPE
8259 && TYPE_MODE (type) == DFmode)
8261 if (align < 64)
8262 align = 64;
8266 if (how != align_abi)
8268 if (TREE_CODE (type) == ARRAY_TYPE
8269 && TYPE_MODE (TREE_TYPE (type)) == QImode)
8271 if (align < BITS_PER_WORD)
8272 align = BITS_PER_WORD;
8276 return align;
8279 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
8281 bool
8282 rs6000_special_adjust_field_align_p (tree type, unsigned int computed)
8284 if (TARGET_ALTIVEC && TREE_CODE (type) == VECTOR_TYPE)
8286 if (computed != 128)
8288 static bool warned;
8289 if (!warned && warn_psabi)
8291 warned = true;
8292 inform (input_location,
8293 "the layout of aggregates containing vectors with"
8294 " %d-byte alignment has changed in GCC 5",
8295 computed / BITS_PER_UNIT);
8298 /* In current GCC there is no special case. */
8299 return false;
8302 return false;
8305 /* AIX increases natural record alignment to doubleword if the first
8306 field is an FP double while the FP fields remain word aligned. */
8308 unsigned int
8309 rs6000_special_round_type_align (tree type, unsigned int computed,
8310 unsigned int specified)
8312 unsigned int align = MAX (computed, specified);
8313 tree field = TYPE_FIELDS (type);
8315 /* Skip all non field decls */
8316 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
8317 field = DECL_CHAIN (field);
8319 if (field != NULL && field != type)
8321 type = TREE_TYPE (field);
8322 while (TREE_CODE (type) == ARRAY_TYPE)
8323 type = TREE_TYPE (type);
8325 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
8326 align = MAX (align, 64);
8329 return align;
8332 /* Darwin increases record alignment to the natural alignment of
8333 the first field. */
8335 unsigned int
8336 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
8337 unsigned int specified)
8339 unsigned int align = MAX (computed, specified);
8341 if (TYPE_PACKED (type))
8342 return align;
8344 /* Find the first field, looking down into aggregates. */
8345 do {
8346 tree field = TYPE_FIELDS (type);
8347 /* Skip all non field decls */
8348 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
8349 field = DECL_CHAIN (field);
8350 if (! field)
8351 break;
8352 /* A packed field does not contribute any extra alignment. */
8353 if (DECL_PACKED (field))
8354 return align;
8355 type = TREE_TYPE (field);
8356 while (TREE_CODE (type) == ARRAY_TYPE)
8357 type = TREE_TYPE (type);
8358 } while (AGGREGATE_TYPE_P (type));
8360 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
8361 align = MAX (align, TYPE_ALIGN (type));
8363 return align;
8366 /* Return 1 for an operand in small memory on V.4/eabi. */
8369 small_data_operand (rtx op ATTRIBUTE_UNUSED,
8370 machine_mode mode ATTRIBUTE_UNUSED)
8372 #if TARGET_ELF
8373 rtx sym_ref;
8375 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
8376 return 0;
8378 if (DEFAULT_ABI != ABI_V4)
8379 return 0;
8381 /* Vector and float memory instructions have a limited offset on the
8382 SPE, so using a vector or float variable directly as an operand is
8383 not useful. */
8384 if (TARGET_SPE
8385 && (SPE_VECTOR_MODE (mode) || FLOAT_MODE_P (mode)))
8386 return 0;
8388 if (GET_CODE (op) == SYMBOL_REF)
8389 sym_ref = op;
8391 else if (GET_CODE (op) != CONST
8392 || GET_CODE (XEXP (op, 0)) != PLUS
8393 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF
8394 || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT)
8395 return 0;
8397 else
8399 rtx sum = XEXP (op, 0);
8400 HOST_WIDE_INT summand;
8402 /* We have to be careful here, because it is the referenced address
8403 that must be 32k from _SDA_BASE_, not just the symbol. */
8404 summand = INTVAL (XEXP (sum, 1));
8405 if (summand < 0 || summand > g_switch_value)
8406 return 0;
8408 sym_ref = XEXP (sum, 0);
8411 return SYMBOL_REF_SMALL_P (sym_ref);
8412 #else
8413 return 0;
8414 #endif
8417 /* Return true if either operand is a general purpose register. */
8419 bool
8420 gpr_or_gpr_p (rtx op0, rtx op1)
8422 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
8423 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
8426 /* Return true if this is a move direct operation between GPR registers and
8427 floating point/VSX registers. */
8429 bool
8430 direct_move_p (rtx op0, rtx op1)
8432 int regno0, regno1;
8434 if (!REG_P (op0) || !REG_P (op1))
8435 return false;
8437 if (!TARGET_DIRECT_MOVE && !TARGET_MFPGPR)
8438 return false;
8440 regno0 = REGNO (op0);
8441 regno1 = REGNO (op1);
8442 if (regno0 >= FIRST_PSEUDO_REGISTER || regno1 >= FIRST_PSEUDO_REGISTER)
8443 return false;
8445 if (INT_REGNO_P (regno0))
8446 return (TARGET_DIRECT_MOVE) ? VSX_REGNO_P (regno1) : FP_REGNO_P (regno1);
8448 else if (INT_REGNO_P (regno1))
8450 if (TARGET_MFPGPR && FP_REGNO_P (regno0))
8451 return true;
8453 else if (TARGET_DIRECT_MOVE && VSX_REGNO_P (regno0))
8454 return true;
8457 return false;
8460 /* Return true if the OFFSET is valid for the quad address instructions that
8461 use d-form (register + offset) addressing. */
8463 static inline bool
8464 quad_address_offset_p (HOST_WIDE_INT offset)
8466 return (IN_RANGE (offset, -32768, 32767) && ((offset) & 0xf) == 0);
8469 /* Return true if the ADDR is an acceptable address for a quad memory
8470 operation of mode MODE (either LQ/STQ for general purpose registers, or
8471 LXV/STXV for vector registers under ISA 3.0. GPR_P is true if this address
8472 is intended for LQ/STQ. If it is false, the address is intended for the ISA
8473 3.0 LXV/STXV instruction. */
8475 bool
8476 quad_address_p (rtx addr, machine_mode mode, bool strict)
8478 rtx op0, op1;
8480 if (GET_MODE_SIZE (mode) != 16)
8481 return false;
8483 if (legitimate_indirect_address_p (addr, strict))
8484 return true;
8486 if (VECTOR_MODE_P (mode) && !mode_supports_vsx_dform_quad (mode))
8487 return false;
8489 if (GET_CODE (addr) != PLUS)
8490 return false;
8492 op0 = XEXP (addr, 0);
8493 if (!REG_P (op0) || !INT_REG_OK_FOR_BASE_P (op0, strict))
8494 return false;
8496 op1 = XEXP (addr, 1);
8497 if (!CONST_INT_P (op1))
8498 return false;
8500 return quad_address_offset_p (INTVAL (op1));
8503 /* Return true if this is a load or store quad operation. This function does
8504 not handle the atomic quad memory instructions. */
8506 bool
8507 quad_load_store_p (rtx op0, rtx op1)
8509 bool ret;
8511 if (!TARGET_QUAD_MEMORY)
8512 ret = false;
8514 else if (REG_P (op0) && MEM_P (op1))
8515 ret = (quad_int_reg_operand (op0, GET_MODE (op0))
8516 && quad_memory_operand (op1, GET_MODE (op1))
8517 && !reg_overlap_mentioned_p (op0, op1));
8519 else if (MEM_P (op0) && REG_P (op1))
8520 ret = (quad_memory_operand (op0, GET_MODE (op0))
8521 && quad_int_reg_operand (op1, GET_MODE (op1)));
8523 else
8524 ret = false;
8526 if (TARGET_DEBUG_ADDR)
8528 fprintf (stderr, "\n========== quad_load_store, return %s\n",
8529 ret ? "true" : "false");
8530 debug_rtx (gen_rtx_SET (op0, op1));
8533 return ret;
8536 /* Given an address, return a constant offset term if one exists. */
8538 static rtx
8539 address_offset (rtx op)
8541 if (GET_CODE (op) == PRE_INC
8542 || GET_CODE (op) == PRE_DEC)
8543 op = XEXP (op, 0);
8544 else if (GET_CODE (op) == PRE_MODIFY
8545 || GET_CODE (op) == LO_SUM)
8546 op = XEXP (op, 1);
8548 if (GET_CODE (op) == CONST)
8549 op = XEXP (op, 0);
8551 if (GET_CODE (op) == PLUS)
8552 op = XEXP (op, 1);
8554 if (CONST_INT_P (op))
8555 return op;
8557 return NULL_RTX;
8560 /* Return true if the MEM operand is a memory operand suitable for use
8561 with a (full width, possibly multiple) gpr load/store. On
8562 powerpc64 this means the offset must be divisible by 4.
8563 Implements 'Y' constraint.
8565 Accept direct, indexed, offset, lo_sum and tocref. Since this is
8566 a constraint function we know the operand has satisfied a suitable
8567 memory predicate. Also accept some odd rtl generated by reload
8568 (see rs6000_legitimize_reload_address for various forms). It is
8569 important that reload rtl be accepted by appropriate constraints
8570 but not by the operand predicate.
8572 Offsetting a lo_sum should not be allowed, except where we know by
8573 alignment that a 32k boundary is not crossed, but see the ???
8574 comment in rs6000_legitimize_reload_address. Note that by
8575 "offsetting" here we mean a further offset to access parts of the
8576 MEM. It's fine to have a lo_sum where the inner address is offset
8577 from a sym, since the same sym+offset will appear in the high part
8578 of the address calculation. */
8580 bool
8581 mem_operand_gpr (rtx op, machine_mode mode)
8583 unsigned HOST_WIDE_INT offset;
8584 int extra;
8585 rtx addr = XEXP (op, 0);
8587 op = address_offset (addr);
8588 if (op == NULL_RTX)
8589 return true;
8591 offset = INTVAL (op);
8592 if (TARGET_POWERPC64 && (offset & 3) != 0)
8593 return false;
8595 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
8596 if (extra < 0)
8597 extra = 0;
8599 if (GET_CODE (addr) == LO_SUM)
8600 /* For lo_sum addresses, we must allow any offset except one that
8601 causes a wrap, so test only the low 16 bits. */
8602 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
8604 return offset + 0x8000 < 0x10000u - extra;
8607 /* As above, but for DS-FORM VSX insns. Unlike mem_operand_gpr,
8608 enforce an offset divisible by 4 even for 32-bit. */
8610 bool
8611 mem_operand_ds_form (rtx op, machine_mode mode)
8613 unsigned HOST_WIDE_INT offset;
8614 int extra;
8615 rtx addr = XEXP (op, 0);
8617 if (!offsettable_address_p (false, mode, addr))
8618 return false;
8620 op = address_offset (addr);
8621 if (op == NULL_RTX)
8622 return true;
8624 offset = INTVAL (op);
8625 if ((offset & 3) != 0)
8626 return false;
8628 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
8629 if (extra < 0)
8630 extra = 0;
8632 if (GET_CODE (addr) == LO_SUM)
8633 /* For lo_sum addresses, we must allow any offset except one that
8634 causes a wrap, so test only the low 16 bits. */
8635 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
8637 return offset + 0x8000 < 0x10000u - extra;
8640 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
8642 static bool
8643 reg_offset_addressing_ok_p (machine_mode mode)
8645 switch (mode)
8647 case V16QImode:
8648 case V8HImode:
8649 case V4SFmode:
8650 case V4SImode:
8651 case V2DFmode:
8652 case V2DImode:
8653 case V1TImode:
8654 case TImode:
8655 case TFmode:
8656 case KFmode:
8657 /* AltiVec/VSX vector modes. Only reg+reg addressing was valid until the
8658 ISA 3.0 vector d-form addressing mode was added. While TImode is not
8659 a vector mode, if we want to use the VSX registers to move it around,
8660 we need to restrict ourselves to reg+reg addressing. Similarly for
8661 IEEE 128-bit floating point that is passed in a single vector
8662 register. */
8663 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
8664 return mode_supports_vsx_dform_quad (mode);
8665 break;
8667 case V4HImode:
8668 case V2SImode:
8669 case V1DImode:
8670 case V2SFmode:
8671 /* Paired vector modes. Only reg+reg addressing is valid. */
8672 if (TARGET_PAIRED_FLOAT)
8673 return false;
8674 break;
8676 case SDmode:
8677 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
8678 addressing for the LFIWZX and STFIWX instructions. */
8679 if (TARGET_NO_SDMODE_STACK)
8680 return false;
8681 break;
8683 default:
8684 break;
8687 return true;
8690 static bool
8691 virtual_stack_registers_memory_p (rtx op)
8693 int regnum;
8695 if (GET_CODE (op) == REG)
8696 regnum = REGNO (op);
8698 else if (GET_CODE (op) == PLUS
8699 && GET_CODE (XEXP (op, 0)) == REG
8700 && GET_CODE (XEXP (op, 1)) == CONST_INT)
8701 regnum = REGNO (XEXP (op, 0));
8703 else
8704 return false;
8706 return (regnum >= FIRST_VIRTUAL_REGISTER
8707 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
8710 /* Return true if a MODE sized memory accesses to OP plus OFFSET
8711 is known to not straddle a 32k boundary. This function is used
8712 to determine whether -mcmodel=medium code can use TOC pointer
8713 relative addressing for OP. This means the alignment of the TOC
8714 pointer must also be taken into account, and unfortunately that is
8715 only 8 bytes. */
8717 #ifndef POWERPC64_TOC_POINTER_ALIGNMENT
8718 #define POWERPC64_TOC_POINTER_ALIGNMENT 8
8719 #endif
8721 static bool
8722 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
8723 machine_mode mode)
8725 tree decl;
8726 unsigned HOST_WIDE_INT dsize, dalign, lsb, mask;
8728 if (GET_CODE (op) != SYMBOL_REF)
8729 return false;
8731 /* ISA 3.0 vector d-form addressing is restricted, don't allow
8732 SYMBOL_REF. */
8733 if (mode_supports_vsx_dform_quad (mode))
8734 return false;
8736 dsize = GET_MODE_SIZE (mode);
8737 decl = SYMBOL_REF_DECL (op);
8738 if (!decl)
8740 if (dsize == 0)
8741 return false;
8743 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
8744 replacing memory addresses with an anchor plus offset. We
8745 could find the decl by rummaging around in the block->objects
8746 VEC for the given offset but that seems like too much work. */
8747 dalign = BITS_PER_UNIT;
8748 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
8749 && SYMBOL_REF_ANCHOR_P (op)
8750 && SYMBOL_REF_BLOCK (op) != NULL)
8752 struct object_block *block = SYMBOL_REF_BLOCK (op);
8754 dalign = block->alignment;
8755 offset += SYMBOL_REF_BLOCK_OFFSET (op);
8757 else if (CONSTANT_POOL_ADDRESS_P (op))
8759 /* It would be nice to have get_pool_align().. */
8760 machine_mode cmode = get_pool_mode (op);
8762 dalign = GET_MODE_ALIGNMENT (cmode);
8765 else if (DECL_P (decl))
8767 dalign = DECL_ALIGN (decl);
8769 if (dsize == 0)
8771 /* Allow BLKmode when the entire object is known to not
8772 cross a 32k boundary. */
8773 if (!DECL_SIZE_UNIT (decl))
8774 return false;
8776 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl)))
8777 return false;
8779 dsize = tree_to_uhwi (DECL_SIZE_UNIT (decl));
8780 if (dsize > 32768)
8781 return false;
8783 dalign /= BITS_PER_UNIT;
8784 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
8785 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
8786 return dalign >= dsize;
8789 else
8790 gcc_unreachable ();
8792 /* Find how many bits of the alignment we know for this access. */
8793 dalign /= BITS_PER_UNIT;
8794 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
8795 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
8796 mask = dalign - 1;
8797 lsb = offset & -offset;
8798 mask &= lsb - 1;
8799 dalign = mask + 1;
8801 return dalign >= dsize;
8804 static bool
8805 constant_pool_expr_p (rtx op)
8807 rtx base, offset;
8809 split_const (op, &base, &offset);
8810 return (GET_CODE (base) == SYMBOL_REF
8811 && CONSTANT_POOL_ADDRESS_P (base)
8812 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
8815 static const_rtx tocrel_base, tocrel_offset;
8817 /* Return true if OP is a toc pointer relative address (the output
8818 of create_TOC_reference). If STRICT, do not match non-split
8819 -mcmodel=large/medium toc pointer relative addresses. */
8821 bool
8822 toc_relative_expr_p (const_rtx op, bool strict)
8824 if (!TARGET_TOC)
8825 return false;
8827 if (TARGET_CMODEL != CMODEL_SMALL)
8829 /* When strict ensure we have everything tidy. */
8830 if (strict
8831 && !(GET_CODE (op) == LO_SUM
8832 && REG_P (XEXP (op, 0))
8833 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict)))
8834 return false;
8836 /* When not strict, allow non-split TOC addresses and also allow
8837 (lo_sum (high ..)) TOC addresses created during reload. */
8838 if (GET_CODE (op) == LO_SUM)
8839 op = XEXP (op, 1);
8842 tocrel_base = op;
8843 tocrel_offset = const0_rtx;
8844 if (GET_CODE (op) == PLUS && add_cint_operand (XEXP (op, 1), GET_MODE (op)))
8846 tocrel_base = XEXP (op, 0);
8847 tocrel_offset = XEXP (op, 1);
8850 return (GET_CODE (tocrel_base) == UNSPEC
8851 && XINT (tocrel_base, 1) == UNSPEC_TOCREL);
8854 /* Return true if X is a constant pool address, and also for cmodel=medium
8855 if X is a toc-relative address known to be offsettable within MODE. */
8857 bool
8858 legitimate_constant_pool_address_p (const_rtx x, machine_mode mode,
8859 bool strict)
8861 return (toc_relative_expr_p (x, strict)
8862 && (TARGET_CMODEL != CMODEL_MEDIUM
8863 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
8864 || mode == QImode
8865 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
8866 INTVAL (tocrel_offset), mode)));
8869 static bool
8870 legitimate_small_data_p (machine_mode mode, rtx x)
8872 return (DEFAULT_ABI == ABI_V4
8873 && !flag_pic && !TARGET_TOC
8874 && (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST)
8875 && small_data_operand (x, mode));
8878 /* SPE offset addressing is limited to 5-bits worth of double words. */
8879 #define SPE_CONST_OFFSET_OK(x) (((x) & ~0xf8) == 0)
8881 bool
8882 rs6000_legitimate_offset_address_p (machine_mode mode, rtx x,
8883 bool strict, bool worst_case)
8885 unsigned HOST_WIDE_INT offset;
8886 unsigned int extra;
8888 if (GET_CODE (x) != PLUS)
8889 return false;
8890 if (!REG_P (XEXP (x, 0)))
8891 return false;
8892 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
8893 return false;
8894 if (mode_supports_vsx_dform_quad (mode))
8895 return quad_address_p (x, mode, strict);
8896 if (!reg_offset_addressing_ok_p (mode))
8897 return virtual_stack_registers_memory_p (x);
8898 if (legitimate_constant_pool_address_p (x, mode, strict || lra_in_progress))
8899 return true;
8900 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
8901 return false;
8903 offset = INTVAL (XEXP (x, 1));
8904 extra = 0;
8905 switch (mode)
8907 case V4HImode:
8908 case V2SImode:
8909 case V1DImode:
8910 case V2SFmode:
8911 /* SPE vector modes. */
8912 return SPE_CONST_OFFSET_OK (offset);
8914 case DFmode:
8915 case DDmode:
8916 case DImode:
8917 /* On e500v2, we may have:
8919 (subreg:DF (mem:DI (plus (reg) (const_int))) 0).
8921 Which gets addressed with evldd instructions. */
8922 if (TARGET_E500_DOUBLE)
8923 return SPE_CONST_OFFSET_OK (offset);
8925 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
8926 addressing. */
8927 if (VECTOR_MEM_VSX_P (mode))
8928 return false;
8930 if (!worst_case)
8931 break;
8932 if (!TARGET_POWERPC64)
8933 extra = 4;
8934 else if (offset & 3)
8935 return false;
8936 break;
8938 case TFmode:
8939 case IFmode:
8940 case KFmode:
8941 case TDmode:
8942 case TImode:
8943 case PTImode:
8944 if (TARGET_E500_DOUBLE)
8945 return (SPE_CONST_OFFSET_OK (offset)
8946 && SPE_CONST_OFFSET_OK (offset + 8));
8948 extra = 8;
8949 if (!worst_case)
8950 break;
8951 if (!TARGET_POWERPC64)
8952 extra = 12;
8953 else if (offset & 3)
8954 return false;
8955 break;
8957 default:
8958 break;
8961 offset += 0x8000;
8962 return offset < 0x10000 - extra;
8965 bool
8966 legitimate_indexed_address_p (rtx x, int strict)
8968 rtx op0, op1;
8970 if (GET_CODE (x) != PLUS)
8971 return false;
8973 op0 = XEXP (x, 0);
8974 op1 = XEXP (x, 1);
8976 /* Recognize the rtl generated by reload which we know will later be
8977 replaced with proper base and index regs. */
8978 if (!strict
8979 && reload_in_progress
8980 && (REG_P (op0) || GET_CODE (op0) == PLUS)
8981 && REG_P (op1))
8982 return true;
8984 return (REG_P (op0) && REG_P (op1)
8985 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
8986 && INT_REG_OK_FOR_INDEX_P (op1, strict))
8987 || (INT_REG_OK_FOR_BASE_P (op1, strict)
8988 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
8991 bool
8992 avoiding_indexed_address_p (machine_mode mode)
8994 /* Avoid indexed addressing for modes that have non-indexed
8995 load/store instruction forms. */
8996 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
8999 bool
9000 legitimate_indirect_address_p (rtx x, int strict)
9002 return GET_CODE (x) == REG && INT_REG_OK_FOR_BASE_P (x, strict);
9005 bool
9006 macho_lo_sum_memory_operand (rtx x, machine_mode mode)
9008 if (!TARGET_MACHO || !flag_pic
9009 || mode != SImode || GET_CODE (x) != MEM)
9010 return false;
9011 x = XEXP (x, 0);
9013 if (GET_CODE (x) != LO_SUM)
9014 return false;
9015 if (GET_CODE (XEXP (x, 0)) != REG)
9016 return false;
9017 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
9018 return false;
9019 x = XEXP (x, 1);
9021 return CONSTANT_P (x);
9024 static bool
9025 legitimate_lo_sum_address_p (machine_mode mode, rtx x, int strict)
9027 if (GET_CODE (x) != LO_SUM)
9028 return false;
9029 if (GET_CODE (XEXP (x, 0)) != REG)
9030 return false;
9031 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
9032 return false;
9033 /* quad word addresses are restricted, and we can't use LO_SUM. */
9034 if (mode_supports_vsx_dform_quad (mode))
9035 return false;
9036 /* Restrict addressing for DI because of our SUBREG hackery. */
9037 if (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
9038 return false;
9039 x = XEXP (x, 1);
9041 if (TARGET_ELF || TARGET_MACHO)
9043 bool large_toc_ok;
9045 if (DEFAULT_ABI == ABI_V4 && flag_pic)
9046 return false;
9047 /* LRA doesn't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
9048 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
9049 recognizes some LO_SUM addresses as valid although this
9050 function says opposite. In most cases, LRA through different
9051 transformations can generate correct code for address reloads.
9052 It can not manage only some LO_SUM cases. So we need to add
9053 code analogous to one in rs6000_legitimize_reload_address for
9054 LOW_SUM here saying that some addresses are still valid. */
9055 large_toc_ok = (lra_in_progress && TARGET_CMODEL != CMODEL_SMALL
9056 && small_toc_ref (x, VOIDmode));
9057 if (TARGET_TOC && ! large_toc_ok)
9058 return false;
9059 if (GET_MODE_NUNITS (mode) != 1)
9060 return false;
9061 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
9062 && !(/* ??? Assume floating point reg based on mode? */
9063 TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
9064 && (mode == DFmode || mode == DDmode)))
9065 return false;
9067 return CONSTANT_P (x) || large_toc_ok;
9070 return false;
9074 /* Try machine-dependent ways of modifying an illegitimate address
9075 to be legitimate. If we find one, return the new, valid address.
9076 This is used from only one place: `memory_address' in explow.c.
9078 OLDX is the address as it was before break_out_memory_refs was
9079 called. In some cases it is useful to look at this to decide what
9080 needs to be done.
9082 It is always safe for this function to do nothing. It exists to
9083 recognize opportunities to optimize the output.
9085 On RS/6000, first check for the sum of a register with a constant
9086 integer that is out of range. If so, generate code to add the
9087 constant with the low-order 16 bits masked to the register and force
9088 this result into another register (this can be done with `cau').
9089 Then generate an address of REG+(CONST&0xffff), allowing for the
9090 possibility of bit 16 being a one.
9092 Then check for the sum of a register and something not constant, try to
9093 load the other things into a register and return the sum. */
9095 static rtx
9096 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
9097 machine_mode mode)
9099 unsigned int extra;
9101 if (!reg_offset_addressing_ok_p (mode)
9102 || mode_supports_vsx_dform_quad (mode))
9104 if (virtual_stack_registers_memory_p (x))
9105 return x;
9107 /* In theory we should not be seeing addresses of the form reg+0,
9108 but just in case it is generated, optimize it away. */
9109 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
9110 return force_reg (Pmode, XEXP (x, 0));
9112 /* For TImode with load/store quad, restrict addresses to just a single
9113 pointer, so it works with both GPRs and VSX registers. */
9114 /* Make sure both operands are registers. */
9115 else if (GET_CODE (x) == PLUS
9116 && (mode != TImode || !TARGET_VSX_TIMODE))
9117 return gen_rtx_PLUS (Pmode,
9118 force_reg (Pmode, XEXP (x, 0)),
9119 force_reg (Pmode, XEXP (x, 1)));
9120 else
9121 return force_reg (Pmode, x);
9123 if (GET_CODE (x) == SYMBOL_REF)
9125 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
9126 if (model != 0)
9127 return rs6000_legitimize_tls_address (x, model);
9130 extra = 0;
9131 switch (mode)
9133 case TFmode:
9134 case TDmode:
9135 case TImode:
9136 case PTImode:
9137 case IFmode:
9138 case KFmode:
9139 /* As in legitimate_offset_address_p we do not assume
9140 worst-case. The mode here is just a hint as to the registers
9141 used. A TImode is usually in gprs, but may actually be in
9142 fprs. Leave worst-case scenario for reload to handle via
9143 insn constraints. PTImode is only GPRs. */
9144 extra = 8;
9145 break;
9146 default:
9147 break;
9150 if (GET_CODE (x) == PLUS
9151 && GET_CODE (XEXP (x, 0)) == REG
9152 && GET_CODE (XEXP (x, 1)) == CONST_INT
9153 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
9154 >= 0x10000 - extra)
9155 && !(SPE_VECTOR_MODE (mode)
9156 || (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)))
9158 HOST_WIDE_INT high_int, low_int;
9159 rtx sum;
9160 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
9161 if (low_int >= 0x8000 - extra)
9162 low_int = 0;
9163 high_int = INTVAL (XEXP (x, 1)) - low_int;
9164 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
9165 GEN_INT (high_int)), 0);
9166 return plus_constant (Pmode, sum, low_int);
9168 else if (GET_CODE (x) == PLUS
9169 && GET_CODE (XEXP (x, 0)) == REG
9170 && GET_CODE (XEXP (x, 1)) != CONST_INT
9171 && GET_MODE_NUNITS (mode) == 1
9172 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
9173 || (/* ??? Assume floating point reg based on mode? */
9174 (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
9175 && (mode == DFmode || mode == DDmode)))
9176 && !avoiding_indexed_address_p (mode))
9178 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
9179 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
9181 else if (SPE_VECTOR_MODE (mode)
9182 || (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD))
9184 if (mode == DImode)
9185 return x;
9186 /* We accept [reg + reg] and [reg + OFFSET]. */
9188 if (GET_CODE (x) == PLUS)
9190 rtx op1 = XEXP (x, 0);
9191 rtx op2 = XEXP (x, 1);
9192 rtx y;
9194 op1 = force_reg (Pmode, op1);
9196 if (GET_CODE (op2) != REG
9197 && (GET_CODE (op2) != CONST_INT
9198 || !SPE_CONST_OFFSET_OK (INTVAL (op2))
9199 || (GET_MODE_SIZE (mode) > 8
9200 && !SPE_CONST_OFFSET_OK (INTVAL (op2) + 8))))
9201 op2 = force_reg (Pmode, op2);
9203 /* We can't always do [reg + reg] for these, because [reg +
9204 reg + offset] is not a legitimate addressing mode. */
9205 y = gen_rtx_PLUS (Pmode, op1, op2);
9207 if ((GET_MODE_SIZE (mode) > 8 || mode == DDmode) && REG_P (op2))
9208 return force_reg (Pmode, y);
9209 else
9210 return y;
9213 return force_reg (Pmode, x);
9215 else if ((TARGET_ELF
9216 #if TARGET_MACHO
9217 || !MACHO_DYNAMIC_NO_PIC_P
9218 #endif
9220 && TARGET_32BIT
9221 && TARGET_NO_TOC
9222 && ! flag_pic
9223 && GET_CODE (x) != CONST_INT
9224 && GET_CODE (x) != CONST_WIDE_INT
9225 && GET_CODE (x) != CONST_DOUBLE
9226 && CONSTANT_P (x)
9227 && GET_MODE_NUNITS (mode) == 1
9228 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
9229 || (/* ??? Assume floating point reg based on mode? */
9230 (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
9231 && (mode == DFmode || mode == DDmode))))
9233 rtx reg = gen_reg_rtx (Pmode);
9234 if (TARGET_ELF)
9235 emit_insn (gen_elf_high (reg, x));
9236 else
9237 emit_insn (gen_macho_high (reg, x));
9238 return gen_rtx_LO_SUM (Pmode, reg, x);
9240 else if (TARGET_TOC
9241 && GET_CODE (x) == SYMBOL_REF
9242 && constant_pool_expr_p (x)
9243 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
9244 return create_TOC_reference (x, NULL_RTX);
9245 else
9246 return x;
9249 /* Debug version of rs6000_legitimize_address. */
9250 static rtx
9251 rs6000_debug_legitimize_address (rtx x, rtx oldx, machine_mode mode)
9253 rtx ret;
9254 rtx_insn *insns;
9256 start_sequence ();
9257 ret = rs6000_legitimize_address (x, oldx, mode);
9258 insns = get_insns ();
9259 end_sequence ();
9261 if (ret != x)
9263 fprintf (stderr,
9264 "\nrs6000_legitimize_address: mode %s, old code %s, "
9265 "new code %s, modified\n",
9266 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
9267 GET_RTX_NAME (GET_CODE (ret)));
9269 fprintf (stderr, "Original address:\n");
9270 debug_rtx (x);
9272 fprintf (stderr, "oldx:\n");
9273 debug_rtx (oldx);
9275 fprintf (stderr, "New address:\n");
9276 debug_rtx (ret);
9278 if (insns)
9280 fprintf (stderr, "Insns added:\n");
9281 debug_rtx_list (insns, 20);
9284 else
9286 fprintf (stderr,
9287 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
9288 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
9290 debug_rtx (x);
9293 if (insns)
9294 emit_insn (insns);
9296 return ret;
9299 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
9300 We need to emit DTP-relative relocations. */
9302 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
9303 static void
9304 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
9306 switch (size)
9308 case 4:
9309 fputs ("\t.long\t", file);
9310 break;
9311 case 8:
9312 fputs (DOUBLE_INT_ASM_OP, file);
9313 break;
9314 default:
9315 gcc_unreachable ();
9317 output_addr_const (file, x);
9318 if (TARGET_ELF)
9319 fputs ("@dtprel+0x8000", file);
9320 else if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF)
9322 switch (SYMBOL_REF_TLS_MODEL (x))
9324 case 0:
9325 break;
9326 case TLS_MODEL_LOCAL_EXEC:
9327 fputs ("@le", file);
9328 break;
9329 case TLS_MODEL_INITIAL_EXEC:
9330 fputs ("@ie", file);
9331 break;
9332 case TLS_MODEL_GLOBAL_DYNAMIC:
9333 case TLS_MODEL_LOCAL_DYNAMIC:
9334 fputs ("@m", file);
9335 break;
9336 default:
9337 gcc_unreachable ();
9342 /* Return true if X is a symbol that refers to real (rather than emulated)
9343 TLS. */
9345 static bool
9346 rs6000_real_tls_symbol_ref_p (rtx x)
9348 return (GET_CODE (x) == SYMBOL_REF
9349 && SYMBOL_REF_TLS_MODEL (x) >= TLS_MODEL_REAL);
9352 /* In the name of slightly smaller debug output, and to cater to
9353 general assembler lossage, recognize various UNSPEC sequences
9354 and turn them back into a direct symbol reference. */
9356 static rtx
9357 rs6000_delegitimize_address (rtx orig_x)
9359 rtx x, y, offset;
9361 orig_x = delegitimize_mem_from_attrs (orig_x);
9362 x = orig_x;
9363 if (MEM_P (x))
9364 x = XEXP (x, 0);
9366 y = x;
9367 if (TARGET_CMODEL != CMODEL_SMALL
9368 && GET_CODE (y) == LO_SUM)
9369 y = XEXP (y, 1);
9371 offset = NULL_RTX;
9372 if (GET_CODE (y) == PLUS
9373 && GET_MODE (y) == Pmode
9374 && CONST_INT_P (XEXP (y, 1)))
9376 offset = XEXP (y, 1);
9377 y = XEXP (y, 0);
9380 if (GET_CODE (y) == UNSPEC
9381 && XINT (y, 1) == UNSPEC_TOCREL)
9383 y = XVECEXP (y, 0, 0);
9385 #ifdef HAVE_AS_TLS
9386 /* Do not associate thread-local symbols with the original
9387 constant pool symbol. */
9388 if (TARGET_XCOFF
9389 && GET_CODE (y) == SYMBOL_REF
9390 && CONSTANT_POOL_ADDRESS_P (y)
9391 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y)))
9392 return orig_x;
9393 #endif
9395 if (offset != NULL_RTX)
9396 y = gen_rtx_PLUS (Pmode, y, offset);
9397 if (!MEM_P (orig_x))
9398 return y;
9399 else
9400 return replace_equiv_address_nv (orig_x, y);
9403 if (TARGET_MACHO
9404 && GET_CODE (orig_x) == LO_SUM
9405 && GET_CODE (XEXP (orig_x, 1)) == CONST)
9407 y = XEXP (XEXP (orig_x, 1), 0);
9408 if (GET_CODE (y) == UNSPEC
9409 && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
9410 return XVECEXP (y, 0, 0);
9413 return orig_x;
9416 /* Return true if X shouldn't be emitted into the debug info.
9417 The linker doesn't like .toc section references from
9418 .debug_* sections, so reject .toc section symbols. */
9420 static bool
9421 rs6000_const_not_ok_for_debug_p (rtx x)
9423 if (GET_CODE (x) == SYMBOL_REF
9424 && CONSTANT_POOL_ADDRESS_P (x))
9426 rtx c = get_pool_constant (x);
9427 machine_mode cmode = get_pool_mode (x);
9428 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
9429 return true;
9432 return false;
9436 /* Implement the TARGET_LEGITIMATE_COMBINED_INSN hook. */
9438 static bool
9439 rs6000_legitimate_combined_insn (rtx_insn *insn)
9441 int icode = INSN_CODE (insn);
9443 /* Reject creating doloop insns. Combine should not be allowed
9444 to create these for a number of reasons:
9445 1) In a nested loop, if combine creates one of these in an
9446 outer loop and the register allocator happens to allocate ctr
9447 to the outer loop insn, then the inner loop can't use ctr.
9448 Inner loops ought to be more highly optimized.
9449 2) Combine often wants to create one of these from what was
9450 originally a three insn sequence, first combining the three
9451 insns to two, then to ctrsi/ctrdi. When ctrsi/ctrdi is not
9452 allocated ctr, the splitter takes use back to the three insn
9453 sequence. It's better to stop combine at the two insn
9454 sequence.
9455 3) Faced with not being able to allocate ctr for ctrsi/crtdi
9456 insns, the register allocator sometimes uses floating point
9457 or vector registers for the pseudo. Since ctrsi/ctrdi is a
9458 jump insn and output reloads are not implemented for jumps,
9459 the ctrsi/ctrdi splitters need to handle all possible cases.
9460 That's a pain, and it gets to be seriously difficult when a
9461 splitter that runs after reload needs memory to transfer from
9462 a gpr to fpr. See PR70098 and PR71763 which are not fixed
9463 for the difficult case. It's better to not create problems
9464 in the first place. */
9465 if (icode != CODE_FOR_nothing
9466 && (icode == CODE_FOR_ctrsi_internal1
9467 || icode == CODE_FOR_ctrdi_internal1
9468 || icode == CODE_FOR_ctrsi_internal2
9469 || icode == CODE_FOR_ctrdi_internal2
9470 || icode == CODE_FOR_ctrsi_internal3
9471 || icode == CODE_FOR_ctrdi_internal3
9472 || icode == CODE_FOR_ctrsi_internal4
9473 || icode == CODE_FOR_ctrdi_internal4))
9474 return false;
9476 return true;
9479 /* Construct the SYMBOL_REF for the tls_get_addr function. */
9481 static GTY(()) rtx rs6000_tls_symbol;
9482 static rtx
9483 rs6000_tls_get_addr (void)
9485 if (!rs6000_tls_symbol)
9486 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
9488 return rs6000_tls_symbol;
9491 /* Construct the SYMBOL_REF for TLS GOT references. */
9493 static GTY(()) rtx rs6000_got_symbol;
9494 static rtx
9495 rs6000_got_sym (void)
9497 if (!rs6000_got_symbol)
9499 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
9500 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
9501 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
9504 return rs6000_got_symbol;
9507 /* AIX Thread-Local Address support. */
9509 static rtx
9510 rs6000_legitimize_tls_address_aix (rtx addr, enum tls_model model)
9512 rtx sym, mem, tocref, tlsreg, tmpreg, dest, tlsaddr;
9513 const char *name;
9514 char *tlsname;
9516 name = XSTR (addr, 0);
9517 /* Append TLS CSECT qualifier, unless the symbol already is qualified
9518 or the symbol will be in TLS private data section. */
9519 if (name[strlen (name) - 1] != ']'
9520 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr))
9521 || bss_initializer_p (SYMBOL_REF_DECL (addr))))
9523 tlsname = XALLOCAVEC (char, strlen (name) + 4);
9524 strcpy (tlsname, name);
9525 strcat (tlsname,
9526 bss_initializer_p (SYMBOL_REF_DECL (addr)) ? "[UL]" : "[TL]");
9527 tlsaddr = copy_rtx (addr);
9528 XSTR (tlsaddr, 0) = ggc_strdup (tlsname);
9530 else
9531 tlsaddr = addr;
9533 /* Place addr into TOC constant pool. */
9534 sym = force_const_mem (GET_MODE (tlsaddr), tlsaddr);
9536 /* Output the TOC entry and create the MEM referencing the value. */
9537 if (constant_pool_expr_p (XEXP (sym, 0))
9538 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym, 0)), Pmode))
9540 tocref = create_TOC_reference (XEXP (sym, 0), NULL_RTX);
9541 mem = gen_const_mem (Pmode, tocref);
9542 set_mem_alias_set (mem, get_TOC_alias_set ());
9544 else
9545 return sym;
9547 /* Use global-dynamic for local-dynamic. */
9548 if (model == TLS_MODEL_GLOBAL_DYNAMIC
9549 || model == TLS_MODEL_LOCAL_DYNAMIC)
9551 /* Create new TOC reference for @m symbol. */
9552 name = XSTR (XVECEXP (XEXP (mem, 0), 0, 0), 0);
9553 tlsname = XALLOCAVEC (char, strlen (name) + 1);
9554 strcpy (tlsname, "*LCM");
9555 strcat (tlsname, name + 3);
9556 rtx modaddr = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tlsname));
9557 SYMBOL_REF_FLAGS (modaddr) |= SYMBOL_FLAG_LOCAL;
9558 tocref = create_TOC_reference (modaddr, NULL_RTX);
9559 rtx modmem = gen_const_mem (Pmode, tocref);
9560 set_mem_alias_set (modmem, get_TOC_alias_set ());
9562 rtx modreg = gen_reg_rtx (Pmode);
9563 emit_insn (gen_rtx_SET (modreg, modmem));
9565 tmpreg = gen_reg_rtx (Pmode);
9566 emit_insn (gen_rtx_SET (tmpreg, mem));
9568 dest = gen_reg_rtx (Pmode);
9569 if (TARGET_32BIT)
9570 emit_insn (gen_tls_get_addrsi (dest, modreg, tmpreg));
9571 else
9572 emit_insn (gen_tls_get_addrdi (dest, modreg, tmpreg));
9573 return dest;
9575 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
9576 else if (TARGET_32BIT)
9578 tlsreg = gen_reg_rtx (SImode);
9579 emit_insn (gen_tls_get_tpointer (tlsreg));
9581 else
9582 tlsreg = gen_rtx_REG (DImode, 13);
9584 /* Load the TOC value into temporary register. */
9585 tmpreg = gen_reg_rtx (Pmode);
9586 emit_insn (gen_rtx_SET (tmpreg, mem));
9587 set_unique_reg_note (get_last_insn (), REG_EQUAL,
9588 gen_rtx_MINUS (Pmode, addr, tlsreg));
9590 /* Add TOC symbol value to TLS pointer. */
9591 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tmpreg, tlsreg));
9593 return dest;
9596 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
9597 this (thread-local) address. */
9599 static rtx
9600 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
9602 rtx dest, insn;
9604 if (TARGET_XCOFF)
9605 return rs6000_legitimize_tls_address_aix (addr, model);
9607 dest = gen_reg_rtx (Pmode);
9608 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
9610 rtx tlsreg;
9612 if (TARGET_64BIT)
9614 tlsreg = gen_rtx_REG (Pmode, 13);
9615 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
9617 else
9619 tlsreg = gen_rtx_REG (Pmode, 2);
9620 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
9622 emit_insn (insn);
9624 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
9626 rtx tlsreg, tmp;
9628 tmp = gen_reg_rtx (Pmode);
9629 if (TARGET_64BIT)
9631 tlsreg = gen_rtx_REG (Pmode, 13);
9632 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
9634 else
9636 tlsreg = gen_rtx_REG (Pmode, 2);
9637 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
9639 emit_insn (insn);
9640 if (TARGET_64BIT)
9641 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
9642 else
9643 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
9644 emit_insn (insn);
9646 else
9648 rtx r3, got, tga, tmp1, tmp2, call_insn;
9650 /* We currently use relocations like @got@tlsgd for tls, which
9651 means the linker will handle allocation of tls entries, placing
9652 them in the .got section. So use a pointer to the .got section,
9653 not one to secondary TOC sections used by 64-bit -mminimal-toc,
9654 or to secondary GOT sections used by 32-bit -fPIC. */
9655 if (TARGET_64BIT)
9656 got = gen_rtx_REG (Pmode, 2);
9657 else
9659 if (flag_pic == 1)
9660 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
9661 else
9663 rtx gsym = rs6000_got_sym ();
9664 got = gen_reg_rtx (Pmode);
9665 if (flag_pic == 0)
9666 rs6000_emit_move (got, gsym, Pmode);
9667 else
9669 rtx mem, lab;
9671 tmp1 = gen_reg_rtx (Pmode);
9672 tmp2 = gen_reg_rtx (Pmode);
9673 mem = gen_const_mem (Pmode, tmp1);
9674 lab = gen_label_rtx ();
9675 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
9676 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
9677 if (TARGET_LINK_STACK)
9678 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
9679 emit_move_insn (tmp2, mem);
9680 rtx_insn *last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
9681 set_unique_reg_note (last, REG_EQUAL, gsym);
9686 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
9688 tga = rs6000_tls_get_addr ();
9689 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
9690 1, const0_rtx, Pmode);
9692 r3 = gen_rtx_REG (Pmode, 3);
9693 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9695 if (TARGET_64BIT)
9696 insn = gen_tls_gd_aix64 (r3, got, addr, tga, const0_rtx);
9697 else
9698 insn = gen_tls_gd_aix32 (r3, got, addr, tga, const0_rtx);
9700 else if (DEFAULT_ABI == ABI_V4)
9701 insn = gen_tls_gd_sysvsi (r3, got, addr, tga, const0_rtx);
9702 else
9703 gcc_unreachable ();
9704 call_insn = last_call_insn ();
9705 PATTERN (call_insn) = insn;
9706 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
9707 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
9708 pic_offset_table_rtx);
9710 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
9712 tga = rs6000_tls_get_addr ();
9713 tmp1 = gen_reg_rtx (Pmode);
9714 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
9715 1, const0_rtx, Pmode);
9717 r3 = gen_rtx_REG (Pmode, 3);
9718 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9720 if (TARGET_64BIT)
9721 insn = gen_tls_ld_aix64 (r3, got, tga, const0_rtx);
9722 else
9723 insn = gen_tls_ld_aix32 (r3, got, tga, const0_rtx);
9725 else if (DEFAULT_ABI == ABI_V4)
9726 insn = gen_tls_ld_sysvsi (r3, got, tga, const0_rtx);
9727 else
9728 gcc_unreachable ();
9729 call_insn = last_call_insn ();
9730 PATTERN (call_insn) = insn;
9731 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
9732 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
9733 pic_offset_table_rtx);
9735 if (rs6000_tls_size == 16)
9737 if (TARGET_64BIT)
9738 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
9739 else
9740 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
9742 else if (rs6000_tls_size == 32)
9744 tmp2 = gen_reg_rtx (Pmode);
9745 if (TARGET_64BIT)
9746 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
9747 else
9748 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
9749 emit_insn (insn);
9750 if (TARGET_64BIT)
9751 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
9752 else
9753 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
9755 else
9757 tmp2 = gen_reg_rtx (Pmode);
9758 if (TARGET_64BIT)
9759 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
9760 else
9761 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
9762 emit_insn (insn);
9763 insn = gen_rtx_SET (dest, gen_rtx_PLUS (Pmode, tmp2, tmp1));
9765 emit_insn (insn);
9767 else
9769 /* IE, or 64-bit offset LE. */
9770 tmp2 = gen_reg_rtx (Pmode);
9771 if (TARGET_64BIT)
9772 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
9773 else
9774 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
9775 emit_insn (insn);
9776 if (TARGET_64BIT)
9777 insn = gen_tls_tls_64 (dest, tmp2, addr);
9778 else
9779 insn = gen_tls_tls_32 (dest, tmp2, addr);
9780 emit_insn (insn);
9784 return dest;
9787 /* Only create the global variable for the stack protect guard if we are using
9788 the global flavor of that guard. */
9789 static tree
9790 rs6000_init_stack_protect_guard (void)
9792 if (rs6000_stack_protector_guard == SSP_GLOBAL)
9793 return default_stack_protect_guard ();
9795 return NULL_TREE;
9798 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
9800 static bool
9801 rs6000_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
9803 if (GET_CODE (x) == HIGH
9804 && GET_CODE (XEXP (x, 0)) == UNSPEC)
9805 return true;
9807 /* A TLS symbol in the TOC cannot contain a sum. */
9808 if (GET_CODE (x) == CONST
9809 && GET_CODE (XEXP (x, 0)) == PLUS
9810 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
9811 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) != 0)
9812 return true;
9814 /* Do not place an ELF TLS symbol in the constant pool. */
9815 return TARGET_ELF && tls_referenced_p (x);
9818 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
9819 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
9820 can be addressed relative to the toc pointer. */
9822 static bool
9823 use_toc_relative_ref (rtx sym, machine_mode mode)
9825 return ((constant_pool_expr_p (sym)
9826 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
9827 get_pool_mode (sym)))
9828 || (TARGET_CMODEL == CMODEL_MEDIUM
9829 && SYMBOL_REF_LOCAL_P (sym)
9830 && GET_MODE_SIZE (mode) <= POWERPC64_TOC_POINTER_ALIGNMENT));
9833 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
9834 replace the input X, or the original X if no replacement is called for.
9835 The output parameter *WIN is 1 if the calling macro should goto WIN,
9836 0 if it should not.
9838 For RS/6000, we wish to handle large displacements off a base
9839 register by splitting the addend across an addiu/addis and the mem insn.
9840 This cuts number of extra insns needed from 3 to 1.
9842 On Darwin, we use this to generate code for floating point constants.
9843 A movsf_low is generated so we wind up with 2 instructions rather than 3.
9844 The Darwin code is inside #if TARGET_MACHO because only then are the
9845 machopic_* functions defined. */
9846 static rtx
9847 rs6000_legitimize_reload_address (rtx x, machine_mode mode,
9848 int opnum, int type,
9849 int ind_levels ATTRIBUTE_UNUSED, int *win)
9851 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
9852 bool quad_offset_p = mode_supports_vsx_dform_quad (mode);
9854 /* Nasty hack for vsx_splat_v2df/v2di load from mem, which takes a
9855 DFmode/DImode MEM. Ditto for ISA 3.0 vsx_splat_v4sf/v4si. */
9856 if (reg_offset_p
9857 && opnum == 1
9858 && ((mode == DFmode && recog_data.operand_mode[0] == V2DFmode)
9859 || (mode == DImode && recog_data.operand_mode[0] == V2DImode)
9860 || (mode == SFmode && recog_data.operand_mode[0] == V4SFmode
9861 && TARGET_P9_VECTOR)
9862 || (mode == SImode && recog_data.operand_mode[0] == V4SImode
9863 && TARGET_P9_VECTOR)))
9864 reg_offset_p = false;
9866 /* We must recognize output that we have already generated ourselves. */
9867 if (GET_CODE (x) == PLUS
9868 && GET_CODE (XEXP (x, 0)) == PLUS
9869 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
9870 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
9871 && GET_CODE (XEXP (x, 1)) == CONST_INT)
9873 if (TARGET_DEBUG_ADDR)
9875 fprintf (stderr, "\nlegitimize_reload_address push_reload #1:\n");
9876 debug_rtx (x);
9878 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9879 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
9880 opnum, (enum reload_type) type);
9881 *win = 1;
9882 return x;
9885 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
9886 if (GET_CODE (x) == LO_SUM
9887 && GET_CODE (XEXP (x, 0)) == HIGH)
9889 if (TARGET_DEBUG_ADDR)
9891 fprintf (stderr, "\nlegitimize_reload_address push_reload #2:\n");
9892 debug_rtx (x);
9894 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9895 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9896 opnum, (enum reload_type) type);
9897 *win = 1;
9898 return x;
9901 #if TARGET_MACHO
9902 if (DEFAULT_ABI == ABI_DARWIN && flag_pic
9903 && GET_CODE (x) == LO_SUM
9904 && GET_CODE (XEXP (x, 0)) == PLUS
9905 && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
9906 && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
9907 && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
9908 && machopic_operand_p (XEXP (x, 1)))
9910 /* Result of previous invocation of this function on Darwin
9911 floating point constant. */
9912 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9913 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9914 opnum, (enum reload_type) type);
9915 *win = 1;
9916 return x;
9918 #endif
9920 if (TARGET_CMODEL != CMODEL_SMALL
9921 && reg_offset_p
9922 && !quad_offset_p
9923 && small_toc_ref (x, VOIDmode))
9925 rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
9926 x = gen_rtx_LO_SUM (Pmode, hi, x);
9927 if (TARGET_DEBUG_ADDR)
9929 fprintf (stderr, "\nlegitimize_reload_address push_reload #3:\n");
9930 debug_rtx (x);
9932 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9933 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9934 opnum, (enum reload_type) type);
9935 *win = 1;
9936 return x;
9939 if (GET_CODE (x) == PLUS
9940 && REG_P (XEXP (x, 0))
9941 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
9942 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
9943 && CONST_INT_P (XEXP (x, 1))
9944 && reg_offset_p
9945 && !SPE_VECTOR_MODE (mode)
9946 && !(TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
9947 && (quad_offset_p || !VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode)))
9949 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
9950 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
9951 HOST_WIDE_INT high
9952 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
9954 /* Check for 32-bit overflow or quad addresses with one of the
9955 four least significant bits set. */
9956 if (high + low != val
9957 || (quad_offset_p && (low & 0xf)))
9959 *win = 0;
9960 return x;
9963 /* Reload the high part into a base reg; leave the low part
9964 in the mem directly. */
9966 x = gen_rtx_PLUS (GET_MODE (x),
9967 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
9968 GEN_INT (high)),
9969 GEN_INT (low));
9971 if (TARGET_DEBUG_ADDR)
9973 fprintf (stderr, "\nlegitimize_reload_address push_reload #4:\n");
9974 debug_rtx (x);
9976 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9977 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
9978 opnum, (enum reload_type) type);
9979 *win = 1;
9980 return x;
9983 if (GET_CODE (x) == SYMBOL_REF
9984 && reg_offset_p
9985 && !quad_offset_p
9986 && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode))
9987 && !SPE_VECTOR_MODE (mode)
9988 #if TARGET_MACHO
9989 && DEFAULT_ABI == ABI_DARWIN
9990 && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
9991 && machopic_symbol_defined_p (x)
9992 #else
9993 && DEFAULT_ABI == ABI_V4
9994 && !flag_pic
9995 #endif
9996 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
9997 The same goes for DImode without 64-bit gprs and DFmode and DDmode
9998 without fprs.
9999 ??? Assume floating point reg based on mode? This assumption is
10000 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
10001 where reload ends up doing a DFmode load of a constant from
10002 mem using two gprs. Unfortunately, at this point reload
10003 hasn't yet selected regs so poking around in reload data
10004 won't help and even if we could figure out the regs reliably,
10005 we'd still want to allow this transformation when the mem is
10006 naturally aligned. Since we say the address is good here, we
10007 can't disable offsets from LO_SUMs in mem_operand_gpr.
10008 FIXME: Allow offset from lo_sum for other modes too, when
10009 mem is sufficiently aligned.
10011 Also disallow this if the type can go in VMX/Altivec registers, since
10012 those registers do not have d-form (reg+offset) address modes. */
10013 && !reg_addr[mode].scalar_in_vmx_p
10014 && mode != TFmode
10015 && mode != TDmode
10016 && mode != IFmode
10017 && mode != KFmode
10018 && (mode != TImode || !TARGET_VSX_TIMODE)
10019 && mode != PTImode
10020 && (mode != DImode || TARGET_POWERPC64)
10021 && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
10022 || (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)))
10024 #if TARGET_MACHO
10025 if (flag_pic)
10027 rtx offset = machopic_gen_offset (x);
10028 x = gen_rtx_LO_SUM (GET_MODE (x),
10029 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
10030 gen_rtx_HIGH (Pmode, offset)), offset);
10032 else
10033 #endif
10034 x = gen_rtx_LO_SUM (GET_MODE (x),
10035 gen_rtx_HIGH (Pmode, x), x);
10037 if (TARGET_DEBUG_ADDR)
10039 fprintf (stderr, "\nlegitimize_reload_address push_reload #5:\n");
10040 debug_rtx (x);
10042 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
10043 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
10044 opnum, (enum reload_type) type);
10045 *win = 1;
10046 return x;
10049 /* Reload an offset address wrapped by an AND that represents the
10050 masking of the lower bits. Strip the outer AND and let reload
10051 convert the offset address into an indirect address. For VSX,
10052 force reload to create the address with an AND in a separate
10053 register, because we can't guarantee an altivec register will
10054 be used. */
10055 if (VECTOR_MEM_ALTIVEC_P (mode)
10056 && GET_CODE (x) == AND
10057 && GET_CODE (XEXP (x, 0)) == PLUS
10058 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
10059 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
10060 && GET_CODE (XEXP (x, 1)) == CONST_INT
10061 && INTVAL (XEXP (x, 1)) == -16)
10063 x = XEXP (x, 0);
10064 *win = 1;
10065 return x;
10068 if (TARGET_TOC
10069 && reg_offset_p
10070 && !quad_offset_p
10071 && GET_CODE (x) == SYMBOL_REF
10072 && use_toc_relative_ref (x, mode))
10074 x = create_TOC_reference (x, NULL_RTX);
10075 if (TARGET_CMODEL != CMODEL_SMALL)
10077 if (TARGET_DEBUG_ADDR)
10079 fprintf (stderr, "\nlegitimize_reload_address push_reload #6:\n");
10080 debug_rtx (x);
10082 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
10083 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
10084 opnum, (enum reload_type) type);
10086 *win = 1;
10087 return x;
10089 *win = 0;
10090 return x;
10093 /* Debug version of rs6000_legitimize_reload_address. */
10094 static rtx
10095 rs6000_debug_legitimize_reload_address (rtx x, machine_mode mode,
10096 int opnum, int type,
10097 int ind_levels, int *win)
10099 rtx ret = rs6000_legitimize_reload_address (x, mode, opnum, type,
10100 ind_levels, win);
10101 fprintf (stderr,
10102 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
10103 "type = %d, ind_levels = %d, win = %d, original addr:\n",
10104 GET_MODE_NAME (mode), opnum, type, ind_levels, *win);
10105 debug_rtx (x);
10107 if (x == ret)
10108 fprintf (stderr, "Same address returned\n");
10109 else if (!ret)
10110 fprintf (stderr, "NULL returned\n");
10111 else
10113 fprintf (stderr, "New address:\n");
10114 debug_rtx (ret);
10117 return ret;
10120 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
10121 that is a valid memory address for an instruction.
10122 The MODE argument is the machine mode for the MEM expression
10123 that wants to use this address.
10125 On the RS/6000, there are four valid address: a SYMBOL_REF that
10126 refers to a constant pool entry of an address (or the sum of it
10127 plus a constant), a short (16-bit signed) constant plus a register,
10128 the sum of two registers, or a register indirect, possibly with an
10129 auto-increment. For DFmode, DDmode and DImode with a constant plus
10130 register, we must ensure that both words are addressable or PowerPC64
10131 with offset word aligned.
10133 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
10134 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
10135 because adjacent memory cells are accessed by adding word-sized offsets
10136 during assembly output. */
10137 static bool
10138 rs6000_legitimate_address_p (machine_mode mode, rtx x, bool reg_ok_strict)
10140 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
10141 bool quad_offset_p = mode_supports_vsx_dform_quad (mode);
10143 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
10144 if (VECTOR_MEM_ALTIVEC_P (mode)
10145 && GET_CODE (x) == AND
10146 && GET_CODE (XEXP (x, 1)) == CONST_INT
10147 && INTVAL (XEXP (x, 1)) == -16)
10148 x = XEXP (x, 0);
10150 if (TARGET_ELF && RS6000_SYMBOL_REF_TLS_P (x))
10151 return 0;
10152 if (legitimate_indirect_address_p (x, reg_ok_strict))
10153 return 1;
10154 if (TARGET_UPDATE
10155 && (GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
10156 && mode_supports_pre_incdec_p (mode)
10157 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
10158 return 1;
10159 /* Handle restricted vector d-form offsets in ISA 3.0. */
10160 if (quad_offset_p)
10162 if (quad_address_p (x, mode, reg_ok_strict))
10163 return 1;
10165 else if (virtual_stack_registers_memory_p (x))
10166 return 1;
10168 else if (reg_offset_p)
10170 if (legitimate_small_data_p (mode, x))
10171 return 1;
10172 if (legitimate_constant_pool_address_p (x, mode,
10173 reg_ok_strict || lra_in_progress))
10174 return 1;
10175 if (reg_addr[mode].fused_toc && GET_CODE (x) == UNSPEC
10176 && XINT (x, 1) == UNSPEC_FUSION_ADDIS)
10177 return 1;
10180 /* For TImode, if we have TImode in VSX registers, only allow register
10181 indirect addresses. This will allow the values to go in either GPRs
10182 or VSX registers without reloading. The vector types would tend to
10183 go into VSX registers, so we allow REG+REG, while TImode seems
10184 somewhat split, in that some uses are GPR based, and some VSX based. */
10185 /* FIXME: We could loosen this by changing the following to
10186 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX_TIMODE)
10187 but currently we cannot allow REG+REG addressing for TImode. See
10188 PR72827 for complete details on how this ends up hoodwinking DSE. */
10189 if (mode == TImode && TARGET_VSX_TIMODE)
10190 return 0;
10191 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
10192 if (! reg_ok_strict
10193 && reg_offset_p
10194 && GET_CODE (x) == PLUS
10195 && GET_CODE (XEXP (x, 0)) == REG
10196 && (XEXP (x, 0) == virtual_stack_vars_rtx
10197 || XEXP (x, 0) == arg_pointer_rtx)
10198 && GET_CODE (XEXP (x, 1)) == CONST_INT)
10199 return 1;
10200 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
10201 return 1;
10202 if (!FLOAT128_2REG_P (mode)
10203 && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
10204 || TARGET_POWERPC64
10205 || (mode != DFmode && mode != DDmode)
10206 || (TARGET_E500_DOUBLE && mode != DDmode))
10207 && (TARGET_POWERPC64 || mode != DImode)
10208 && (mode != TImode || VECTOR_MEM_VSX_P (TImode))
10209 && mode != PTImode
10210 && !avoiding_indexed_address_p (mode)
10211 && legitimate_indexed_address_p (x, reg_ok_strict))
10212 return 1;
10213 if (TARGET_UPDATE && GET_CODE (x) == PRE_MODIFY
10214 && mode_supports_pre_modify_p (mode)
10215 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
10216 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
10217 reg_ok_strict, false)
10218 || (!avoiding_indexed_address_p (mode)
10219 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
10220 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
10221 return 1;
10222 if (reg_offset_p && !quad_offset_p
10223 && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
10224 return 1;
10225 return 0;
10228 /* Debug version of rs6000_legitimate_address_p. */
10229 static bool
10230 rs6000_debug_legitimate_address_p (machine_mode mode, rtx x,
10231 bool reg_ok_strict)
10233 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
10234 fprintf (stderr,
10235 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
10236 "strict = %d, reload = %s, code = %s\n",
10237 ret ? "true" : "false",
10238 GET_MODE_NAME (mode),
10239 reg_ok_strict,
10240 (reload_completed
10241 ? "after"
10242 : (reload_in_progress ? "progress" : "before")),
10243 GET_RTX_NAME (GET_CODE (x)));
10244 debug_rtx (x);
10246 return ret;
10249 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
10251 static bool
10252 rs6000_mode_dependent_address_p (const_rtx addr,
10253 addr_space_t as ATTRIBUTE_UNUSED)
10255 return rs6000_mode_dependent_address_ptr (addr);
10258 /* Go to LABEL if ADDR (a legitimate address expression)
10259 has an effect that depends on the machine mode it is used for.
10261 On the RS/6000 this is true of all integral offsets (since AltiVec
10262 and VSX modes don't allow them) or is a pre-increment or decrement.
10264 ??? Except that due to conceptual problems in offsettable_address_p
10265 we can't really report the problems of integral offsets. So leave
10266 this assuming that the adjustable offset must be valid for the
10267 sub-words of a TFmode operand, which is what we had before. */
10269 static bool
10270 rs6000_mode_dependent_address (const_rtx addr)
10272 switch (GET_CODE (addr))
10274 case PLUS:
10275 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
10276 is considered a legitimate address before reload, so there
10277 are no offset restrictions in that case. Note that this
10278 condition is safe in strict mode because any address involving
10279 virtual_stack_vars_rtx or arg_pointer_rtx would already have
10280 been rejected as illegitimate. */
10281 if (XEXP (addr, 0) != virtual_stack_vars_rtx
10282 && XEXP (addr, 0) != arg_pointer_rtx
10283 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
10285 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
10286 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
10288 break;
10290 case LO_SUM:
10291 /* Anything in the constant pool is sufficiently aligned that
10292 all bytes have the same high part address. */
10293 return !legitimate_constant_pool_address_p (addr, QImode, false);
10295 /* Auto-increment cases are now treated generically in recog.c. */
10296 case PRE_MODIFY:
10297 return TARGET_UPDATE;
10299 /* AND is only allowed in Altivec loads. */
10300 case AND:
10301 return true;
10303 default:
10304 break;
10307 return false;
10310 /* Debug version of rs6000_mode_dependent_address. */
10311 static bool
10312 rs6000_debug_mode_dependent_address (const_rtx addr)
10314 bool ret = rs6000_mode_dependent_address (addr);
10316 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
10317 ret ? "true" : "false");
10318 debug_rtx (addr);
10320 return ret;
10323 /* Implement FIND_BASE_TERM. */
10326 rs6000_find_base_term (rtx op)
10328 rtx base;
10330 base = op;
10331 if (GET_CODE (base) == CONST)
10332 base = XEXP (base, 0);
10333 if (GET_CODE (base) == PLUS)
10334 base = XEXP (base, 0);
10335 if (GET_CODE (base) == UNSPEC)
10336 switch (XINT (base, 1))
10338 case UNSPEC_TOCREL:
10339 case UNSPEC_MACHOPIC_OFFSET:
10340 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
10341 for aliasing purposes. */
10342 return XVECEXP (base, 0, 0);
10345 return op;
10348 /* More elaborate version of recog's offsettable_memref_p predicate
10349 that works around the ??? note of rs6000_mode_dependent_address.
10350 In particular it accepts
10352 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
10354 in 32-bit mode, that the recog predicate rejects. */
10356 static bool
10357 rs6000_offsettable_memref_p (rtx op, machine_mode reg_mode)
10359 bool worst_case;
10361 if (!MEM_P (op))
10362 return false;
10364 /* First mimic offsettable_memref_p. */
10365 if (offsettable_address_p (true, GET_MODE (op), XEXP (op, 0)))
10366 return true;
10368 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
10369 the latter predicate knows nothing about the mode of the memory
10370 reference and, therefore, assumes that it is the largest supported
10371 mode (TFmode). As a consequence, legitimate offsettable memory
10372 references are rejected. rs6000_legitimate_offset_address_p contains
10373 the correct logic for the PLUS case of rs6000_mode_dependent_address,
10374 at least with a little bit of help here given that we know the
10375 actual registers used. */
10376 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
10377 || GET_MODE_SIZE (reg_mode) == 4);
10378 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
10379 true, worst_case);
10382 /* Determine the reassociation width to be used in reassociate_bb.
10383 This takes into account how many parallel operations we
10384 can actually do of a given type, and also the latency.
10386 int add/sub 6/cycle
10387 mul 2/cycle
10388 vect add/sub/mul 2/cycle
10389 fp add/sub/mul 2/cycle
10390 dfp 1/cycle
10393 static int
10394 rs6000_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED,
10395 enum machine_mode mode)
10397 switch (rs6000_cpu)
10399 case PROCESSOR_POWER8:
10400 case PROCESSOR_POWER9:
10401 if (DECIMAL_FLOAT_MODE_P (mode))
10402 return 1;
10403 if (VECTOR_MODE_P (mode))
10404 return 4;
10405 if (INTEGRAL_MODE_P (mode))
10406 return opc == MULT_EXPR ? 4 : 6;
10407 if (FLOAT_MODE_P (mode))
10408 return 4;
10409 break;
10410 default:
10411 break;
10413 return 1;
10416 /* Change register usage conditional on target flags. */
10417 static void
10418 rs6000_conditional_register_usage (void)
10420 int i;
10422 if (TARGET_DEBUG_TARGET)
10423 fprintf (stderr, "rs6000_conditional_register_usage called\n");
10425 /* Set MQ register fixed (already call_used) so that it will not be
10426 allocated. */
10427 fixed_regs[64] = 1;
10429 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
10430 if (TARGET_64BIT)
10431 fixed_regs[13] = call_used_regs[13]
10432 = call_really_used_regs[13] = 1;
10434 /* Conditionally disable FPRs. */
10435 if (TARGET_SOFT_FLOAT || !TARGET_FPRS)
10436 for (i = 32; i < 64; i++)
10437 fixed_regs[i] = call_used_regs[i]
10438 = call_really_used_regs[i] = 1;
10440 /* The TOC register is not killed across calls in a way that is
10441 visible to the compiler. */
10442 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
10443 call_really_used_regs[2] = 0;
10445 if (DEFAULT_ABI == ABI_V4 && flag_pic == 2)
10446 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10448 if (DEFAULT_ABI == ABI_V4 && flag_pic == 1)
10449 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10450 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10451 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10453 if (DEFAULT_ABI == ABI_DARWIN && flag_pic)
10454 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10455 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10456 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10458 if (TARGET_TOC && TARGET_MINIMAL_TOC)
10459 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10460 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10462 if (TARGET_SPE)
10464 global_regs[SPEFSCR_REGNO] = 1;
10465 /* We used to use r14 as FIXED_SCRATCH to address SPE 64-bit
10466 registers in prologues and epilogues. We no longer use r14
10467 for FIXED_SCRATCH, but we're keeping r14 out of the allocation
10468 pool for link-compatibility with older versions of GCC. Once
10469 "old" code has died out, we can return r14 to the allocation
10470 pool. */
10471 fixed_regs[14]
10472 = call_used_regs[14]
10473 = call_really_used_regs[14] = 1;
10476 if (!TARGET_ALTIVEC && !TARGET_VSX)
10478 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
10479 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
10480 call_really_used_regs[VRSAVE_REGNO] = 1;
10483 if (TARGET_ALTIVEC || TARGET_VSX)
10484 global_regs[VSCR_REGNO] = 1;
10486 if (TARGET_ALTIVEC_ABI)
10488 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
10489 call_used_regs[i] = call_really_used_regs[i] = 1;
10491 /* AIX reserves VR20:31 in non-extended ABI mode. */
10492 if (TARGET_XCOFF)
10493 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
10494 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
10499 /* Output insns to set DEST equal to the constant SOURCE as a series of
10500 lis, ori and shl instructions and return TRUE. */
10502 bool
10503 rs6000_emit_set_const (rtx dest, rtx source)
10505 machine_mode mode = GET_MODE (dest);
10506 rtx temp, set;
10507 rtx_insn *insn;
10508 HOST_WIDE_INT c;
10510 gcc_checking_assert (CONST_INT_P (source));
10511 c = INTVAL (source);
10512 switch (mode)
10514 case QImode:
10515 case HImode:
10516 emit_insn (gen_rtx_SET (dest, source));
10517 return true;
10519 case SImode:
10520 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
10522 emit_insn (gen_rtx_SET (copy_rtx (temp),
10523 GEN_INT (c & ~(HOST_WIDE_INT) 0xffff)));
10524 emit_insn (gen_rtx_SET (dest,
10525 gen_rtx_IOR (SImode, copy_rtx (temp),
10526 GEN_INT (c & 0xffff))));
10527 break;
10529 case DImode:
10530 if (!TARGET_POWERPC64)
10532 rtx hi, lo;
10534 hi = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN == 0,
10535 DImode);
10536 lo = operand_subword_force (dest, WORDS_BIG_ENDIAN != 0,
10537 DImode);
10538 emit_move_insn (hi, GEN_INT (c >> 32));
10539 c = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
10540 emit_move_insn (lo, GEN_INT (c));
10542 else
10543 rs6000_emit_set_long_const (dest, c);
10544 break;
10546 default:
10547 gcc_unreachable ();
10550 insn = get_last_insn ();
10551 set = single_set (insn);
10552 if (! CONSTANT_P (SET_SRC (set)))
10553 set_unique_reg_note (insn, REG_EQUAL, GEN_INT (c));
10555 return true;
10558 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
10559 Output insns to set DEST equal to the constant C as a series of
10560 lis, ori and shl instructions. */
10562 static void
10563 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c)
10565 rtx temp;
10566 HOST_WIDE_INT ud1, ud2, ud3, ud4;
10568 ud1 = c & 0xffff;
10569 c = c >> 16;
10570 ud2 = c & 0xffff;
10571 c = c >> 16;
10572 ud3 = c & 0xffff;
10573 c = c >> 16;
10574 ud4 = c & 0xffff;
10576 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
10577 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
10578 emit_move_insn (dest, GEN_INT ((ud1 ^ 0x8000) - 0x8000));
10580 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
10581 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
10583 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10585 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
10586 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
10587 if (ud1 != 0)
10588 emit_move_insn (dest,
10589 gen_rtx_IOR (DImode, copy_rtx (temp),
10590 GEN_INT (ud1)));
10592 else if (ud3 == 0 && ud4 == 0)
10594 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10596 gcc_assert (ud2 & 0x8000);
10597 emit_move_insn (copy_rtx (temp),
10598 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
10599 if (ud1 != 0)
10600 emit_move_insn (copy_rtx (temp),
10601 gen_rtx_IOR (DImode, copy_rtx (temp),
10602 GEN_INT (ud1)));
10603 emit_move_insn (dest,
10604 gen_rtx_ZERO_EXTEND (DImode,
10605 gen_lowpart (SImode,
10606 copy_rtx (temp))));
10608 else if ((ud4 == 0xffff && (ud3 & 0x8000))
10609 || (ud4 == 0 && ! (ud3 & 0x8000)))
10611 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10613 emit_move_insn (copy_rtx (temp),
10614 GEN_INT (((ud3 << 16) ^ 0x80000000) - 0x80000000));
10615 if (ud2 != 0)
10616 emit_move_insn (copy_rtx (temp),
10617 gen_rtx_IOR (DImode, copy_rtx (temp),
10618 GEN_INT (ud2)));
10619 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
10620 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
10621 GEN_INT (16)));
10622 if (ud1 != 0)
10623 emit_move_insn (dest,
10624 gen_rtx_IOR (DImode, copy_rtx (temp),
10625 GEN_INT (ud1)));
10627 else
10629 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10631 emit_move_insn (copy_rtx (temp),
10632 GEN_INT (((ud4 << 16) ^ 0x80000000) - 0x80000000));
10633 if (ud3 != 0)
10634 emit_move_insn (copy_rtx (temp),
10635 gen_rtx_IOR (DImode, copy_rtx (temp),
10636 GEN_INT (ud3)));
10638 emit_move_insn (ud2 != 0 || ud1 != 0 ? copy_rtx (temp) : dest,
10639 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
10640 GEN_INT (32)));
10641 if (ud2 != 0)
10642 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
10643 gen_rtx_IOR (DImode, copy_rtx (temp),
10644 GEN_INT (ud2 << 16)));
10645 if (ud1 != 0)
10646 emit_move_insn (dest,
10647 gen_rtx_IOR (DImode, copy_rtx (temp),
10648 GEN_INT (ud1)));
10652 /* Helper for the following. Get rid of [r+r] memory refs
10653 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
10655 static void
10656 rs6000_eliminate_indexed_memrefs (rtx operands[2])
10658 if (reload_in_progress)
10659 return;
10661 if (GET_CODE (operands[0]) == MEM
10662 && GET_CODE (XEXP (operands[0], 0)) != REG
10663 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
10664 GET_MODE (operands[0]), false))
10665 operands[0]
10666 = replace_equiv_address (operands[0],
10667 copy_addr_to_reg (XEXP (operands[0], 0)));
10669 if (GET_CODE (operands[1]) == MEM
10670 && GET_CODE (XEXP (operands[1], 0)) != REG
10671 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
10672 GET_MODE (operands[1]), false))
10673 operands[1]
10674 = replace_equiv_address (operands[1],
10675 copy_addr_to_reg (XEXP (operands[1], 0)));
10678 /* Generate a vector of constants to permute MODE for a little-endian
10679 storage operation by swapping the two halves of a vector. */
10680 static rtvec
10681 rs6000_const_vec (machine_mode mode)
10683 int i, subparts;
10684 rtvec v;
10686 switch (mode)
10688 case V1TImode:
10689 subparts = 1;
10690 break;
10691 case V2DFmode:
10692 case V2DImode:
10693 subparts = 2;
10694 break;
10695 case V4SFmode:
10696 case V4SImode:
10697 subparts = 4;
10698 break;
10699 case V8HImode:
10700 subparts = 8;
10701 break;
10702 case V16QImode:
10703 subparts = 16;
10704 break;
10705 default:
10706 gcc_unreachable();
10709 v = rtvec_alloc (subparts);
10711 for (i = 0; i < subparts / 2; ++i)
10712 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i + subparts / 2);
10713 for (i = subparts / 2; i < subparts; ++i)
10714 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i - subparts / 2);
10716 return v;
10719 /* Generate a permute rtx that represents an lxvd2x, stxvd2x, or xxpermdi
10720 for a VSX load or store operation. */
10722 rs6000_gen_le_vsx_permute (rtx source, machine_mode mode)
10724 /* Use ROTATE instead of VEC_SELECT on IEEE 128-bit floating point, and
10725 128-bit integers if they are allowed in VSX registers. */
10726 if (FLOAT128_VECTOR_P (mode) || mode == TImode || mode == V1TImode)
10727 return gen_rtx_ROTATE (mode, source, GEN_INT (64));
10728 else
10730 rtx par = gen_rtx_PARALLEL (VOIDmode, rs6000_const_vec (mode));
10731 return gen_rtx_VEC_SELECT (mode, source, par);
10735 /* Emit a little-endian load from vector memory location SOURCE to VSX
10736 register DEST in mode MODE. The load is done with two permuting
10737 insn's that represent an lxvd2x and xxpermdi. */
10738 void
10739 rs6000_emit_le_vsx_load (rtx dest, rtx source, machine_mode mode)
10741 rtx tmp, permute_mem, permute_reg;
10743 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
10744 V1TImode). */
10745 if (mode == TImode || mode == V1TImode)
10747 mode = V2DImode;
10748 dest = gen_lowpart (V2DImode, dest);
10749 source = adjust_address (source, V2DImode, 0);
10752 tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest) : dest;
10753 permute_mem = rs6000_gen_le_vsx_permute (source, mode);
10754 permute_reg = rs6000_gen_le_vsx_permute (tmp, mode);
10755 emit_insn (gen_rtx_SET (tmp, permute_mem));
10756 emit_insn (gen_rtx_SET (dest, permute_reg));
10759 /* Emit a little-endian store to vector memory location DEST from VSX
10760 register SOURCE in mode MODE. The store is done with two permuting
10761 insn's that represent an xxpermdi and an stxvd2x. */
10762 void
10763 rs6000_emit_le_vsx_store (rtx dest, rtx source, machine_mode mode)
10765 rtx tmp, permute_src, permute_tmp;
10767 /* This should never be called during or after reload, because it does
10768 not re-permute the source register. It is intended only for use
10769 during expand. */
10770 gcc_assert (!reload_in_progress && !lra_in_progress && !reload_completed);
10772 /* Use V2DImode to do swaps of types with 128-bit scalar parts (TImode,
10773 V1TImode). */
10774 if (mode == TImode || mode == V1TImode)
10776 mode = V2DImode;
10777 dest = adjust_address (dest, V2DImode, 0);
10778 source = gen_lowpart (V2DImode, source);
10781 tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source) : source;
10782 permute_src = rs6000_gen_le_vsx_permute (source, mode);
10783 permute_tmp = rs6000_gen_le_vsx_permute (tmp, mode);
10784 emit_insn (gen_rtx_SET (tmp, permute_src));
10785 emit_insn (gen_rtx_SET (dest, permute_tmp));
10788 /* Emit a sequence representing a little-endian VSX load or store,
10789 moving data from SOURCE to DEST in mode MODE. This is done
10790 separately from rs6000_emit_move to ensure it is called only
10791 during expand. LE VSX loads and stores introduced later are
10792 handled with a split. The expand-time RTL generation allows
10793 us to optimize away redundant pairs of register-permutes. */
10794 void
10795 rs6000_emit_le_vsx_move (rtx dest, rtx source, machine_mode mode)
10797 gcc_assert (!BYTES_BIG_ENDIAN
10798 && VECTOR_MEM_VSX_P (mode)
10799 && !TARGET_P9_VECTOR
10800 && !gpr_or_gpr_p (dest, source)
10801 && (MEM_P (source) ^ MEM_P (dest)));
10803 if (MEM_P (source))
10805 gcc_assert (REG_P (dest) || GET_CODE (dest) == SUBREG);
10806 rs6000_emit_le_vsx_load (dest, source, mode);
10808 else
10810 if (!REG_P (source))
10811 source = force_reg (mode, source);
10812 rs6000_emit_le_vsx_store (dest, source, mode);
10816 /* Return whether a SFmode or SImode move can be done without converting one
10817 mode to another. This arrises when we have:
10819 (SUBREG:SF (REG:SI ...))
10820 (SUBREG:SI (REG:SF ...))
10822 and one of the values is in a floating point/vector register, where SFmode
10823 scalars are stored in DFmode format. */
10825 bool
10826 valid_sf_si_move (rtx dest, rtx src, machine_mode mode)
10828 if (TARGET_ALLOW_SF_SUBREG)
10829 return true;
10831 if (mode != SFmode && GET_MODE_CLASS (mode) != MODE_INT)
10832 return true;
10834 if (!SUBREG_P (src) || !sf_subreg_operand (src, mode))
10835 return true;
10837 /*. Allow (set (SUBREG:SI (REG:SF)) (SUBREG:SI (REG:SF))). */
10838 if (SUBREG_P (dest))
10840 rtx dest_subreg = SUBREG_REG (dest);
10841 rtx src_subreg = SUBREG_REG (src);
10842 return GET_MODE (dest_subreg) == GET_MODE (src_subreg);
10845 return false;
10849 /* Helper function to change moves with:
10851 (SUBREG:SF (REG:SI)) and
10852 (SUBREG:SI (REG:SF))
10854 into separate UNSPEC insns. In the PowerPC architecture, scalar SFmode
10855 values are stored as DFmode values in the VSX registers. We need to convert
10856 the bits before we can use a direct move or operate on the bits in the
10857 vector register as an integer type.
10859 Skip things like (set (SUBREG:SI (...) (SUBREG:SI (...)). */
10861 static bool
10862 rs6000_emit_move_si_sf_subreg (rtx dest, rtx source, machine_mode mode)
10864 if (TARGET_DIRECT_MOVE_64BIT && !reload_in_progress && !reload_completed
10865 && !lra_in_progress
10866 && (!SUBREG_P (dest) || !sf_subreg_operand (dest, mode))
10867 && SUBREG_P (source) && sf_subreg_operand (source, mode))
10869 rtx inner_source = SUBREG_REG (source);
10870 machine_mode inner_mode = GET_MODE (inner_source);
10872 if (mode == SImode && inner_mode == SFmode)
10874 emit_insn (gen_movsi_from_sf (dest, inner_source));
10875 return true;
10878 if (mode == SFmode && inner_mode == SImode)
10880 emit_insn (gen_movsf_from_si (dest, inner_source));
10881 return true;
10885 return false;
10888 /* Emit a move from SOURCE to DEST in mode MODE. */
10889 void
10890 rs6000_emit_move (rtx dest, rtx source, machine_mode mode)
10892 rtx operands[2];
10893 operands[0] = dest;
10894 operands[1] = source;
10896 if (TARGET_DEBUG_ADDR)
10898 fprintf (stderr,
10899 "\nrs6000_emit_move: mode = %s, reload_in_progress = %d, "
10900 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
10901 GET_MODE_NAME (mode),
10902 reload_in_progress,
10903 reload_completed,
10904 can_create_pseudo_p ());
10905 debug_rtx (dest);
10906 fprintf (stderr, "source:\n");
10907 debug_rtx (source);
10910 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
10911 if (CONST_WIDE_INT_P (operands[1])
10912 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
10914 /* This should be fixed with the introduction of CONST_WIDE_INT. */
10915 gcc_unreachable ();
10918 /* See if we need to special case SImode/SFmode SUBREG moves. */
10919 if ((mode == SImode || mode == SFmode) && SUBREG_P (source)
10920 && rs6000_emit_move_si_sf_subreg (dest, source, mode))
10921 return;
10923 /* Check if GCC is setting up a block move that will end up using FP
10924 registers as temporaries. We must make sure this is acceptable. */
10925 if (GET_CODE (operands[0]) == MEM
10926 && GET_CODE (operands[1]) == MEM
10927 && mode == DImode
10928 && (SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[0]))
10929 || SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[1])))
10930 && ! (SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[0]) > 32
10931 ? 32 : MEM_ALIGN (operands[0])))
10932 || SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[1]) > 32
10933 ? 32
10934 : MEM_ALIGN (operands[1]))))
10935 && ! MEM_VOLATILE_P (operands [0])
10936 && ! MEM_VOLATILE_P (operands [1]))
10938 emit_move_insn (adjust_address (operands[0], SImode, 0),
10939 adjust_address (operands[1], SImode, 0));
10940 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
10941 adjust_address (copy_rtx (operands[1]), SImode, 4));
10942 return;
10945 if (can_create_pseudo_p () && GET_CODE (operands[0]) == MEM
10946 && !gpc_reg_operand (operands[1], mode))
10947 operands[1] = force_reg (mode, operands[1]);
10949 /* Recognize the case where operand[1] is a reference to thread-local
10950 data and load its address to a register. */
10951 if (tls_referenced_p (operands[1]))
10953 enum tls_model model;
10954 rtx tmp = operands[1];
10955 rtx addend = NULL;
10957 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
10959 addend = XEXP (XEXP (tmp, 0), 1);
10960 tmp = XEXP (XEXP (tmp, 0), 0);
10963 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
10964 model = SYMBOL_REF_TLS_MODEL (tmp);
10965 gcc_assert (model != 0);
10967 tmp = rs6000_legitimize_tls_address (tmp, model);
10968 if (addend)
10970 tmp = gen_rtx_PLUS (mode, tmp, addend);
10971 tmp = force_operand (tmp, operands[0]);
10973 operands[1] = tmp;
10976 /* Handle the case where reload calls us with an invalid address. */
10977 if (reload_in_progress && mode == Pmode
10978 && (! general_operand (operands[1], mode)
10979 || ! nonimmediate_operand (operands[0], mode)))
10980 goto emit_set;
10982 /* 128-bit constant floating-point values on Darwin should really be loaded
10983 as two parts. However, this premature splitting is a problem when DFmode
10984 values can go into Altivec registers. */
10985 if (FLOAT128_IBM_P (mode) && !reg_addr[DFmode].scalar_in_vmx_p
10986 && GET_CODE (operands[1]) == CONST_DOUBLE)
10988 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
10989 simplify_gen_subreg (DFmode, operands[1], mode, 0),
10990 DFmode);
10991 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
10992 GET_MODE_SIZE (DFmode)),
10993 simplify_gen_subreg (DFmode, operands[1], mode,
10994 GET_MODE_SIZE (DFmode)),
10995 DFmode);
10996 return;
10999 if (reload_in_progress && cfun->machine->sdmode_stack_slot != NULL_RTX)
11000 cfun->machine->sdmode_stack_slot =
11001 eliminate_regs (cfun->machine->sdmode_stack_slot, VOIDmode, NULL_RTX);
11004 /* Transform (p0:DD, (SUBREG:DD p1:SD)) to ((SUBREG:SD p0:DD),
11005 p1:SD) if p1 is not of floating point class and p0 is spilled as
11006 we can have no analogous movsd_store for this. */
11007 if (lra_in_progress && mode == DDmode
11008 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
11009 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
11010 && GET_CODE (operands[1]) == SUBREG && REG_P (SUBREG_REG (operands[1]))
11011 && GET_MODE (SUBREG_REG (operands[1])) == SDmode)
11013 enum reg_class cl;
11014 int regno = REGNO (SUBREG_REG (operands[1]));
11016 if (regno >= FIRST_PSEUDO_REGISTER)
11018 cl = reg_preferred_class (regno);
11019 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][1];
11021 if (regno >= 0 && ! FP_REGNO_P (regno))
11023 mode = SDmode;
11024 operands[0] = gen_lowpart_SUBREG (SDmode, operands[0]);
11025 operands[1] = SUBREG_REG (operands[1]);
11028 if (lra_in_progress
11029 && mode == SDmode
11030 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
11031 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
11032 && (REG_P (operands[1])
11033 || (GET_CODE (operands[1]) == SUBREG
11034 && REG_P (SUBREG_REG (operands[1])))))
11036 int regno = REGNO (GET_CODE (operands[1]) == SUBREG
11037 ? SUBREG_REG (operands[1]) : operands[1]);
11038 enum reg_class cl;
11040 if (regno >= FIRST_PSEUDO_REGISTER)
11042 cl = reg_preferred_class (regno);
11043 gcc_assert (cl != NO_REGS);
11044 regno = ira_class_hard_regs[cl][0];
11046 if (FP_REGNO_P (regno))
11048 if (GET_MODE (operands[0]) != DDmode)
11049 operands[0] = gen_rtx_SUBREG (DDmode, operands[0], 0);
11050 emit_insn (gen_movsd_store (operands[0], operands[1]));
11052 else if (INT_REGNO_P (regno))
11053 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
11054 else
11055 gcc_unreachable();
11056 return;
11058 /* Transform ((SUBREG:DD p0:SD), p1:DD) to (p0:SD, (SUBREG:SD
11059 p:DD)) if p0 is not of floating point class and p1 is spilled as
11060 we can have no analogous movsd_load for this. */
11061 if (lra_in_progress && mode == DDmode
11062 && GET_CODE (operands[0]) == SUBREG && REG_P (SUBREG_REG (operands[0]))
11063 && GET_MODE (SUBREG_REG (operands[0])) == SDmode
11064 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
11065 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
11067 enum reg_class cl;
11068 int regno = REGNO (SUBREG_REG (operands[0]));
11070 if (regno >= FIRST_PSEUDO_REGISTER)
11072 cl = reg_preferred_class (regno);
11073 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][0];
11075 if (regno >= 0 && ! FP_REGNO_P (regno))
11077 mode = SDmode;
11078 operands[0] = SUBREG_REG (operands[0]);
11079 operands[1] = gen_lowpart_SUBREG (SDmode, operands[1]);
11082 if (lra_in_progress
11083 && mode == SDmode
11084 && (REG_P (operands[0])
11085 || (GET_CODE (operands[0]) == SUBREG
11086 && REG_P (SUBREG_REG (operands[0]))))
11087 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
11088 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
11090 int regno = REGNO (GET_CODE (operands[0]) == SUBREG
11091 ? SUBREG_REG (operands[0]) : operands[0]);
11092 enum reg_class cl;
11094 if (regno >= FIRST_PSEUDO_REGISTER)
11096 cl = reg_preferred_class (regno);
11097 gcc_assert (cl != NO_REGS);
11098 regno = ira_class_hard_regs[cl][0];
11100 if (FP_REGNO_P (regno))
11102 if (GET_MODE (operands[1]) != DDmode)
11103 operands[1] = gen_rtx_SUBREG (DDmode, operands[1], 0);
11104 emit_insn (gen_movsd_load (operands[0], operands[1]));
11106 else if (INT_REGNO_P (regno))
11107 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
11108 else
11109 gcc_unreachable();
11110 return;
11113 if (reload_in_progress
11114 && mode == SDmode
11115 && cfun->machine->sdmode_stack_slot != NULL_RTX
11116 && MEM_P (operands[0])
11117 && rtx_equal_p (operands[0], cfun->machine->sdmode_stack_slot)
11118 && REG_P (operands[1]))
11120 if (FP_REGNO_P (REGNO (operands[1])))
11122 rtx mem = adjust_address_nv (operands[0], DDmode, 0);
11123 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
11124 emit_insn (gen_movsd_store (mem, operands[1]));
11126 else if (INT_REGNO_P (REGNO (operands[1])))
11128 rtx mem = operands[0];
11129 if (BYTES_BIG_ENDIAN)
11130 mem = adjust_address_nv (mem, mode, 4);
11131 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
11132 emit_insn (gen_movsd_hardfloat (mem, operands[1]));
11134 else
11135 gcc_unreachable();
11136 return;
11138 if (reload_in_progress
11139 && mode == SDmode
11140 && REG_P (operands[0])
11141 && MEM_P (operands[1])
11142 && cfun->machine->sdmode_stack_slot != NULL_RTX
11143 && rtx_equal_p (operands[1], cfun->machine->sdmode_stack_slot))
11145 if (FP_REGNO_P (REGNO (operands[0])))
11147 rtx mem = adjust_address_nv (operands[1], DDmode, 0);
11148 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
11149 emit_insn (gen_movsd_load (operands[0], mem));
11151 else if (INT_REGNO_P (REGNO (operands[0])))
11153 rtx mem = operands[1];
11154 if (BYTES_BIG_ENDIAN)
11155 mem = adjust_address_nv (mem, mode, 4);
11156 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
11157 emit_insn (gen_movsd_hardfloat (operands[0], mem));
11159 else
11160 gcc_unreachable();
11161 return;
11164 /* FIXME: In the long term, this switch statement should go away
11165 and be replaced by a sequence of tests based on things like
11166 mode == Pmode. */
11167 switch (mode)
11169 case HImode:
11170 case QImode:
11171 if (CONSTANT_P (operands[1])
11172 && GET_CODE (operands[1]) != CONST_INT)
11173 operands[1] = force_const_mem (mode, operands[1]);
11174 break;
11176 case TFmode:
11177 case TDmode:
11178 case IFmode:
11179 case KFmode:
11180 if (FLOAT128_2REG_P (mode))
11181 rs6000_eliminate_indexed_memrefs (operands);
11182 /* fall through */
11184 case DFmode:
11185 case DDmode:
11186 case SFmode:
11187 case SDmode:
11188 if (CONSTANT_P (operands[1])
11189 && ! easy_fp_constant (operands[1], mode))
11190 operands[1] = force_const_mem (mode, operands[1]);
11191 break;
11193 case V16QImode:
11194 case V8HImode:
11195 case V4SFmode:
11196 case V4SImode:
11197 case V4HImode:
11198 case V2SFmode:
11199 case V2SImode:
11200 case V1DImode:
11201 case V2DFmode:
11202 case V2DImode:
11203 case V1TImode:
11204 if (CONSTANT_P (operands[1])
11205 && !easy_vector_constant (operands[1], mode))
11206 operands[1] = force_const_mem (mode, operands[1]);
11207 break;
11209 case SImode:
11210 case DImode:
11211 /* Use default pattern for address of ELF small data */
11212 if (TARGET_ELF
11213 && mode == Pmode
11214 && DEFAULT_ABI == ABI_V4
11215 && (GET_CODE (operands[1]) == SYMBOL_REF
11216 || GET_CODE (operands[1]) == CONST)
11217 && small_data_operand (operands[1], mode))
11219 emit_insn (gen_rtx_SET (operands[0], operands[1]));
11220 return;
11223 if (DEFAULT_ABI == ABI_V4
11224 && mode == Pmode && mode == SImode
11225 && flag_pic == 1 && got_operand (operands[1], mode))
11227 emit_insn (gen_movsi_got (operands[0], operands[1]));
11228 return;
11231 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
11232 && TARGET_NO_TOC
11233 && ! flag_pic
11234 && mode == Pmode
11235 && CONSTANT_P (operands[1])
11236 && GET_CODE (operands[1]) != HIGH
11237 && GET_CODE (operands[1]) != CONST_INT)
11239 rtx target = (!can_create_pseudo_p ()
11240 ? operands[0]
11241 : gen_reg_rtx (mode));
11243 /* If this is a function address on -mcall-aixdesc,
11244 convert it to the address of the descriptor. */
11245 if (DEFAULT_ABI == ABI_AIX
11246 && GET_CODE (operands[1]) == SYMBOL_REF
11247 && XSTR (operands[1], 0)[0] == '.')
11249 const char *name = XSTR (operands[1], 0);
11250 rtx new_ref;
11251 while (*name == '.')
11252 name++;
11253 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
11254 CONSTANT_POOL_ADDRESS_P (new_ref)
11255 = CONSTANT_POOL_ADDRESS_P (operands[1]);
11256 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
11257 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
11258 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
11259 operands[1] = new_ref;
11262 if (DEFAULT_ABI == ABI_DARWIN)
11264 #if TARGET_MACHO
11265 if (MACHO_DYNAMIC_NO_PIC_P)
11267 /* Take care of any required data indirection. */
11268 operands[1] = rs6000_machopic_legitimize_pic_address (
11269 operands[1], mode, operands[0]);
11270 if (operands[0] != operands[1])
11271 emit_insn (gen_rtx_SET (operands[0], operands[1]));
11272 return;
11274 #endif
11275 emit_insn (gen_macho_high (target, operands[1]));
11276 emit_insn (gen_macho_low (operands[0], target, operands[1]));
11277 return;
11280 emit_insn (gen_elf_high (target, operands[1]));
11281 emit_insn (gen_elf_low (operands[0], target, operands[1]));
11282 return;
11285 /* If this is a SYMBOL_REF that refers to a constant pool entry,
11286 and we have put it in the TOC, we just need to make a TOC-relative
11287 reference to it. */
11288 if (TARGET_TOC
11289 && GET_CODE (operands[1]) == SYMBOL_REF
11290 && use_toc_relative_ref (operands[1], mode))
11291 operands[1] = create_TOC_reference (operands[1], operands[0]);
11292 else if (mode == Pmode
11293 && CONSTANT_P (operands[1])
11294 && GET_CODE (operands[1]) != HIGH
11295 && ((GET_CODE (operands[1]) != CONST_INT
11296 && ! easy_fp_constant (operands[1], mode))
11297 || (GET_CODE (operands[1]) == CONST_INT
11298 && (num_insns_constant (operands[1], mode)
11299 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
11300 || (GET_CODE (operands[0]) == REG
11301 && FP_REGNO_P (REGNO (operands[0]))))
11302 && !toc_relative_expr_p (operands[1], false)
11303 && (TARGET_CMODEL == CMODEL_SMALL
11304 || can_create_pseudo_p ()
11305 || (REG_P (operands[0])
11306 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
11309 #if TARGET_MACHO
11310 /* Darwin uses a special PIC legitimizer. */
11311 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
11313 operands[1] =
11314 rs6000_machopic_legitimize_pic_address (operands[1], mode,
11315 operands[0]);
11316 if (operands[0] != operands[1])
11317 emit_insn (gen_rtx_SET (operands[0], operands[1]));
11318 return;
11320 #endif
11322 /* If we are to limit the number of things we put in the TOC and
11323 this is a symbol plus a constant we can add in one insn,
11324 just put the symbol in the TOC and add the constant. Don't do
11325 this if reload is in progress. */
11326 if (GET_CODE (operands[1]) == CONST
11327 && TARGET_NO_SUM_IN_TOC && ! reload_in_progress
11328 && GET_CODE (XEXP (operands[1], 0)) == PLUS
11329 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
11330 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
11331 || GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF)
11332 && ! side_effects_p (operands[0]))
11334 rtx sym =
11335 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
11336 rtx other = XEXP (XEXP (operands[1], 0), 1);
11338 sym = force_reg (mode, sym);
11339 emit_insn (gen_add3_insn (operands[0], sym, other));
11340 return;
11343 operands[1] = force_const_mem (mode, operands[1]);
11345 if (TARGET_TOC
11346 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
11347 && use_toc_relative_ref (XEXP (operands[1], 0), mode))
11349 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
11350 operands[0]);
11351 operands[1] = gen_const_mem (mode, tocref);
11352 set_mem_alias_set (operands[1], get_TOC_alias_set ());
11355 break;
11357 case TImode:
11358 if (!VECTOR_MEM_VSX_P (TImode))
11359 rs6000_eliminate_indexed_memrefs (operands);
11360 break;
11362 case PTImode:
11363 rs6000_eliminate_indexed_memrefs (operands);
11364 break;
11366 default:
11367 fatal_insn ("bad move", gen_rtx_SET (dest, source));
11370 /* Above, we may have called force_const_mem which may have returned
11371 an invalid address. If we can, fix this up; otherwise, reload will
11372 have to deal with it. */
11373 if (GET_CODE (operands[1]) == MEM && ! reload_in_progress)
11374 operands[1] = validize_mem (operands[1]);
11376 emit_set:
11377 emit_insn (gen_rtx_SET (operands[0], operands[1]));
11380 /* Return true if a structure, union or array containing FIELD should be
11381 accessed using `BLKMODE'.
11383 For the SPE, simd types are V2SI, and gcc can be tempted to put the
11384 entire thing in a DI and use subregs to access the internals.
11385 store_bit_field() will force (subreg:DI (reg:V2SI x))'s to the
11386 back-end. Because a single GPR can hold a V2SI, but not a DI, the
11387 best thing to do is set structs to BLKmode and avoid Severe Tire
11388 Damage.
11390 On e500 v2, DF and DI modes suffer from the same anomaly. DF can
11391 fit into 1, whereas DI still needs two. */
11393 static bool
11394 rs6000_member_type_forces_blk (const_tree field, machine_mode mode)
11396 return ((TARGET_SPE && TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
11397 || (TARGET_E500_DOUBLE && mode == DFmode));
11400 /* Nonzero if we can use a floating-point register to pass this arg. */
11401 #define USE_FP_FOR_ARG_P(CUM,MODE) \
11402 (SCALAR_FLOAT_MODE_NOT_VECTOR_P (MODE) \
11403 && (CUM)->fregno <= FP_ARG_MAX_REG \
11404 && TARGET_HARD_FLOAT && TARGET_FPRS)
11406 /* Nonzero if we can use an AltiVec register to pass this arg. */
11407 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
11408 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
11409 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
11410 && TARGET_ALTIVEC_ABI \
11411 && (NAMED))
11413 /* Walk down the type tree of TYPE counting consecutive base elements.
11414 If *MODEP is VOIDmode, then set it to the first valid floating point
11415 or vector type. If a non-floating point or vector type is found, or
11416 if a floating point or vector type that doesn't match a non-VOIDmode
11417 *MODEP is found, then return -1, otherwise return the count in the
11418 sub-tree. */
11420 static int
11421 rs6000_aggregate_candidate (const_tree type, machine_mode *modep)
11423 machine_mode mode;
11424 HOST_WIDE_INT size;
11426 switch (TREE_CODE (type))
11428 case REAL_TYPE:
11429 mode = TYPE_MODE (type);
11430 if (!SCALAR_FLOAT_MODE_P (mode))
11431 return -1;
11433 if (*modep == VOIDmode)
11434 *modep = mode;
11436 if (*modep == mode)
11437 return 1;
11439 break;
11441 case COMPLEX_TYPE:
11442 mode = TYPE_MODE (TREE_TYPE (type));
11443 if (!SCALAR_FLOAT_MODE_P (mode))
11444 return -1;
11446 if (*modep == VOIDmode)
11447 *modep = mode;
11449 if (*modep == mode)
11450 return 2;
11452 break;
11454 case VECTOR_TYPE:
11455 if (!TARGET_ALTIVEC_ABI || !TARGET_ALTIVEC)
11456 return -1;
11458 /* Use V4SImode as representative of all 128-bit vector types. */
11459 size = int_size_in_bytes (type);
11460 switch (size)
11462 case 16:
11463 mode = V4SImode;
11464 break;
11465 default:
11466 return -1;
11469 if (*modep == VOIDmode)
11470 *modep = mode;
11472 /* Vector modes are considered to be opaque: two vectors are
11473 equivalent for the purposes of being homogeneous aggregates
11474 if they are the same size. */
11475 if (*modep == mode)
11476 return 1;
11478 break;
11480 case ARRAY_TYPE:
11482 int count;
11483 tree index = TYPE_DOMAIN (type);
11485 /* Can't handle incomplete types nor sizes that are not
11486 fixed. */
11487 if (!COMPLETE_TYPE_P (type)
11488 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
11489 return -1;
11491 count = rs6000_aggregate_candidate (TREE_TYPE (type), modep);
11492 if (count == -1
11493 || !index
11494 || !TYPE_MAX_VALUE (index)
11495 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
11496 || !TYPE_MIN_VALUE (index)
11497 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
11498 || count < 0)
11499 return -1;
11501 count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
11502 - tree_to_uhwi (TYPE_MIN_VALUE (index)));
11504 /* There must be no padding. */
11505 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
11506 return -1;
11508 return count;
11511 case RECORD_TYPE:
11513 int count = 0;
11514 int sub_count;
11515 tree field;
11517 /* Can't handle incomplete types nor sizes that are not
11518 fixed. */
11519 if (!COMPLETE_TYPE_P (type)
11520 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
11521 return -1;
11523 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
11525 if (TREE_CODE (field) != FIELD_DECL)
11526 continue;
11528 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
11529 if (sub_count < 0)
11530 return -1;
11531 count += sub_count;
11534 /* There must be no padding. */
11535 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
11536 return -1;
11538 return count;
11541 case UNION_TYPE:
11542 case QUAL_UNION_TYPE:
11544 /* These aren't very interesting except in a degenerate case. */
11545 int count = 0;
11546 int sub_count;
11547 tree field;
11549 /* Can't handle incomplete types nor sizes that are not
11550 fixed. */
11551 if (!COMPLETE_TYPE_P (type)
11552 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
11553 return -1;
11555 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
11557 if (TREE_CODE (field) != FIELD_DECL)
11558 continue;
11560 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
11561 if (sub_count < 0)
11562 return -1;
11563 count = count > sub_count ? count : sub_count;
11566 /* There must be no padding. */
11567 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
11568 return -1;
11570 return count;
11573 default:
11574 break;
11577 return -1;
11580 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
11581 float or vector aggregate that shall be passed in FP/vector registers
11582 according to the ELFv2 ABI, return the homogeneous element mode in
11583 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
11585 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
11587 static bool
11588 rs6000_discover_homogeneous_aggregate (machine_mode mode, const_tree type,
11589 machine_mode *elt_mode,
11590 int *n_elts)
11592 /* Note that we do not accept complex types at the top level as
11593 homogeneous aggregates; these types are handled via the
11594 targetm.calls.split_complex_arg mechanism. Complex types
11595 can be elements of homogeneous aggregates, however. */
11596 if (DEFAULT_ABI == ABI_ELFv2 && type && AGGREGATE_TYPE_P (type))
11598 machine_mode field_mode = VOIDmode;
11599 int field_count = rs6000_aggregate_candidate (type, &field_mode);
11601 if (field_count > 0)
11603 int n_regs = (SCALAR_FLOAT_MODE_P (field_mode) ?
11604 (GET_MODE_SIZE (field_mode) + 7) >> 3 : 1);
11606 /* The ELFv2 ABI allows homogeneous aggregates to occupy
11607 up to AGGR_ARG_NUM_REG registers. */
11608 if (field_count * n_regs <= AGGR_ARG_NUM_REG)
11610 if (elt_mode)
11611 *elt_mode = field_mode;
11612 if (n_elts)
11613 *n_elts = field_count;
11614 return true;
11619 if (elt_mode)
11620 *elt_mode = mode;
11621 if (n_elts)
11622 *n_elts = 1;
11623 return false;
11626 /* Return a nonzero value to say to return the function value in
11627 memory, just as large structures are always returned. TYPE will be
11628 the data type of the value, and FNTYPE will be the type of the
11629 function doing the returning, or @code{NULL} for libcalls.
11631 The AIX ABI for the RS/6000 specifies that all structures are
11632 returned in memory. The Darwin ABI does the same.
11634 For the Darwin 64 Bit ABI, a function result can be returned in
11635 registers or in memory, depending on the size of the return data
11636 type. If it is returned in registers, the value occupies the same
11637 registers as it would if it were the first and only function
11638 argument. Otherwise, the function places its result in memory at
11639 the location pointed to by GPR3.
11641 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
11642 but a draft put them in memory, and GCC used to implement the draft
11643 instead of the final standard. Therefore, aix_struct_return
11644 controls this instead of DEFAULT_ABI; V.4 targets needing backward
11645 compatibility can change DRAFT_V4_STRUCT_RET to override the
11646 default, and -m switches get the final word. See
11647 rs6000_option_override_internal for more details.
11649 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
11650 long double support is enabled. These values are returned in memory.
11652 int_size_in_bytes returns -1 for variable size objects, which go in
11653 memory always. The cast to unsigned makes -1 > 8. */
11655 static bool
11656 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
11658 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
11659 if (TARGET_MACHO
11660 && rs6000_darwin64_abi
11661 && TREE_CODE (type) == RECORD_TYPE
11662 && int_size_in_bytes (type) > 0)
11664 CUMULATIVE_ARGS valcum;
11665 rtx valret;
11667 valcum.words = 0;
11668 valcum.fregno = FP_ARG_MIN_REG;
11669 valcum.vregno = ALTIVEC_ARG_MIN_REG;
11670 /* Do a trial code generation as if this were going to be passed
11671 as an argument; if any part goes in memory, we return NULL. */
11672 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
11673 if (valret)
11674 return false;
11675 /* Otherwise fall through to more conventional ABI rules. */
11678 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
11679 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type), type,
11680 NULL, NULL))
11681 return false;
11683 /* The ELFv2 ABI returns aggregates up to 16B in registers */
11684 if (DEFAULT_ABI == ABI_ELFv2 && AGGREGATE_TYPE_P (type)
11685 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) <= 16)
11686 return false;
11688 if (AGGREGATE_TYPE_P (type)
11689 && (aix_struct_return
11690 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
11691 return true;
11693 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
11694 modes only exist for GCC vector types if -maltivec. */
11695 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
11696 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
11697 return false;
11699 /* Return synthetic vectors in memory. */
11700 if (TREE_CODE (type) == VECTOR_TYPE
11701 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
11703 static bool warned_for_return_big_vectors = false;
11704 if (!warned_for_return_big_vectors)
11706 warning (OPT_Wpsabi, "GCC vector returned by reference: "
11707 "non-standard ABI extension with no compatibility guarantee");
11708 warned_for_return_big_vectors = true;
11710 return true;
11713 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
11714 && FLOAT128_IEEE_P (TYPE_MODE (type)))
11715 return true;
11717 return false;
11720 /* Specify whether values returned in registers should be at the most
11721 significant end of a register. We want aggregates returned by
11722 value to match the way aggregates are passed to functions. */
11724 static bool
11725 rs6000_return_in_msb (const_tree valtype)
11727 return (DEFAULT_ABI == ABI_ELFv2
11728 && BYTES_BIG_ENDIAN
11729 && AGGREGATE_TYPE_P (valtype)
11730 && FUNCTION_ARG_PADDING (TYPE_MODE (valtype), valtype) == upward);
11733 #ifdef HAVE_AS_GNU_ATTRIBUTE
11734 /* Return TRUE if a call to function FNDECL may be one that
11735 potentially affects the function calling ABI of the object file. */
11737 static bool
11738 call_ABI_of_interest (tree fndecl)
11740 if (rs6000_gnu_attr && symtab->state == EXPANSION)
11742 struct cgraph_node *c_node;
11744 /* Libcalls are always interesting. */
11745 if (fndecl == NULL_TREE)
11746 return true;
11748 /* Any call to an external function is interesting. */
11749 if (DECL_EXTERNAL (fndecl))
11750 return true;
11752 /* Interesting functions that we are emitting in this object file. */
11753 c_node = cgraph_node::get (fndecl);
11754 c_node = c_node->ultimate_alias_target ();
11755 return !c_node->only_called_directly_p ();
11757 return false;
11759 #endif
11761 /* Initialize a variable CUM of type CUMULATIVE_ARGS
11762 for a call to a function whose data type is FNTYPE.
11763 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
11765 For incoming args we set the number of arguments in the prototype large
11766 so we never return a PARALLEL. */
11768 void
11769 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
11770 rtx libname ATTRIBUTE_UNUSED, int incoming,
11771 int libcall, int n_named_args,
11772 tree fndecl ATTRIBUTE_UNUSED,
11773 machine_mode return_mode ATTRIBUTE_UNUSED)
11775 static CUMULATIVE_ARGS zero_cumulative;
11777 *cum = zero_cumulative;
11778 cum->words = 0;
11779 cum->fregno = FP_ARG_MIN_REG;
11780 cum->vregno = ALTIVEC_ARG_MIN_REG;
11781 cum->prototype = (fntype && prototype_p (fntype));
11782 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
11783 ? CALL_LIBCALL : CALL_NORMAL);
11784 cum->sysv_gregno = GP_ARG_MIN_REG;
11785 cum->stdarg = stdarg_p (fntype);
11786 cum->libcall = libcall;
11788 cum->nargs_prototype = 0;
11789 if (incoming || cum->prototype)
11790 cum->nargs_prototype = n_named_args;
11792 /* Check for a longcall attribute. */
11793 if ((!fntype && rs6000_default_long_calls)
11794 || (fntype
11795 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
11796 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
11797 cum->call_cookie |= CALL_LONG;
11799 if (TARGET_DEBUG_ARG)
11801 fprintf (stderr, "\ninit_cumulative_args:");
11802 if (fntype)
11804 tree ret_type = TREE_TYPE (fntype);
11805 fprintf (stderr, " ret code = %s,",
11806 get_tree_code_name (TREE_CODE (ret_type)));
11809 if (cum->call_cookie & CALL_LONG)
11810 fprintf (stderr, " longcall,");
11812 fprintf (stderr, " proto = %d, nargs = %d\n",
11813 cum->prototype, cum->nargs_prototype);
11816 #ifdef HAVE_AS_GNU_ATTRIBUTE
11817 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4))
11819 cum->escapes = call_ABI_of_interest (fndecl);
11820 if (cum->escapes)
11822 tree return_type;
11824 if (fntype)
11826 return_type = TREE_TYPE (fntype);
11827 return_mode = TYPE_MODE (return_type);
11829 else
11830 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
11832 if (return_type != NULL)
11834 if (TREE_CODE (return_type) == RECORD_TYPE
11835 && TYPE_TRANSPARENT_AGGR (return_type))
11837 return_type = TREE_TYPE (first_field (return_type));
11838 return_mode = TYPE_MODE (return_type);
11840 if (AGGREGATE_TYPE_P (return_type)
11841 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
11842 <= 8))
11843 rs6000_returns_struct = true;
11845 if (SCALAR_FLOAT_MODE_P (return_mode))
11847 rs6000_passes_float = true;
11848 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
11849 && (FLOAT128_IBM_P (return_mode)
11850 || FLOAT128_IEEE_P (return_mode)
11851 || (return_type != NULL
11852 && (TYPE_MAIN_VARIANT (return_type)
11853 == long_double_type_node))))
11854 rs6000_passes_long_double = true;
11856 if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode)
11857 || SPE_VECTOR_MODE (return_mode))
11858 rs6000_passes_vector = true;
11861 #endif
11863 if (fntype
11864 && !TARGET_ALTIVEC
11865 && TARGET_ALTIVEC_ABI
11866 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
11868 error ("cannot return value in vector register because"
11869 " altivec instructions are disabled, use -maltivec"
11870 " to enable them");
11874 /* The mode the ABI uses for a word. This is not the same as word_mode
11875 for -m32 -mpowerpc64. This is used to implement various target hooks. */
11877 static machine_mode
11878 rs6000_abi_word_mode (void)
11880 return TARGET_32BIT ? SImode : DImode;
11883 /* Implement the TARGET_OFFLOAD_OPTIONS hook. */
11884 static char *
11885 rs6000_offload_options (void)
11887 if (TARGET_64BIT)
11888 return xstrdup ("-foffload-abi=lp64");
11889 else
11890 return xstrdup ("-foffload-abi=ilp32");
11893 /* On rs6000, function arguments are promoted, as are function return
11894 values. */
11896 static machine_mode
11897 rs6000_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
11898 machine_mode mode,
11899 int *punsignedp ATTRIBUTE_UNUSED,
11900 const_tree, int)
11902 PROMOTE_MODE (mode, *punsignedp, type);
11904 return mode;
11907 /* Return true if TYPE must be passed on the stack and not in registers. */
11909 static bool
11910 rs6000_must_pass_in_stack (machine_mode mode, const_tree type)
11912 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2 || TARGET_64BIT)
11913 return must_pass_in_stack_var_size (mode, type);
11914 else
11915 return must_pass_in_stack_var_size_or_pad (mode, type);
11918 static inline bool
11919 is_complex_IBM_long_double (machine_mode mode)
11921 return mode == ICmode || (!TARGET_IEEEQUAD && mode == TCmode);
11924 /* Whether ABI_V4 passes MODE args to a function in floating point
11925 registers. */
11927 static bool
11928 abi_v4_pass_in_fpr (machine_mode mode)
11930 if (!TARGET_FPRS || !TARGET_HARD_FLOAT)
11931 return false;
11932 if (TARGET_SINGLE_FLOAT && mode == SFmode)
11933 return true;
11934 if (TARGET_DOUBLE_FLOAT && mode == DFmode)
11935 return true;
11936 /* ABI_V4 passes complex IBM long double in 8 gprs.
11937 Stupid, but we can't change the ABI now. */
11938 if (is_complex_IBM_long_double (mode))
11939 return false;
11940 if (FLOAT128_2REG_P (mode))
11941 return true;
11942 if (DECIMAL_FLOAT_MODE_P (mode))
11943 return true;
11944 return false;
11947 /* If defined, a C expression which determines whether, and in which
11948 direction, to pad out an argument with extra space. The value
11949 should be of type `enum direction': either `upward' to pad above
11950 the argument, `downward' to pad below, or `none' to inhibit
11951 padding.
11953 For the AIX ABI structs are always stored left shifted in their
11954 argument slot. */
11956 enum direction
11957 function_arg_padding (machine_mode mode, const_tree type)
11959 #ifndef AGGREGATE_PADDING_FIXED
11960 #define AGGREGATE_PADDING_FIXED 0
11961 #endif
11962 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
11963 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
11964 #endif
11966 if (!AGGREGATE_PADDING_FIXED)
11968 /* GCC used to pass structures of the same size as integer types as
11969 if they were in fact integers, ignoring FUNCTION_ARG_PADDING.
11970 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
11971 passed padded downward, except that -mstrict-align further
11972 muddied the water in that multi-component structures of 2 and 4
11973 bytes in size were passed padded upward.
11975 The following arranges for best compatibility with previous
11976 versions of gcc, but removes the -mstrict-align dependency. */
11977 if (BYTES_BIG_ENDIAN)
11979 HOST_WIDE_INT size = 0;
11981 if (mode == BLKmode)
11983 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
11984 size = int_size_in_bytes (type);
11986 else
11987 size = GET_MODE_SIZE (mode);
11989 if (size == 1 || size == 2 || size == 4)
11990 return downward;
11992 return upward;
11995 if (AGGREGATES_PAD_UPWARD_ALWAYS)
11997 if (type != 0 && AGGREGATE_TYPE_P (type))
11998 return upward;
12001 /* Fall back to the default. */
12002 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
12005 /* If defined, a C expression that gives the alignment boundary, in bits,
12006 of an argument with the specified mode and type. If it is not defined,
12007 PARM_BOUNDARY is used for all arguments.
12009 V.4 wants long longs and doubles to be double word aligned. Just
12010 testing the mode size is a boneheaded way to do this as it means
12011 that other types such as complex int are also double word aligned.
12012 However, we're stuck with this because changing the ABI might break
12013 existing library interfaces.
12015 Doubleword align SPE vectors.
12016 Quadword align Altivec/VSX vectors.
12017 Quadword align large synthetic vector types. */
12019 static unsigned int
12020 rs6000_function_arg_boundary (machine_mode mode, const_tree type)
12022 machine_mode elt_mode;
12023 int n_elts;
12025 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
12027 if (DEFAULT_ABI == ABI_V4
12028 && (GET_MODE_SIZE (mode) == 8
12029 || (TARGET_HARD_FLOAT
12030 && TARGET_FPRS
12031 && !is_complex_IBM_long_double (mode)
12032 && FLOAT128_2REG_P (mode))))
12033 return 64;
12034 else if (FLOAT128_VECTOR_P (mode))
12035 return 128;
12036 else if (SPE_VECTOR_MODE (mode)
12037 || (type && TREE_CODE (type) == VECTOR_TYPE
12038 && int_size_in_bytes (type) >= 8
12039 && int_size_in_bytes (type) < 16))
12040 return 64;
12041 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
12042 || (type && TREE_CODE (type) == VECTOR_TYPE
12043 && int_size_in_bytes (type) >= 16))
12044 return 128;
12046 /* Aggregate types that need > 8 byte alignment are quadword-aligned
12047 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
12048 -mcompat-align-parm is used. */
12049 if (((DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm)
12050 || DEFAULT_ABI == ABI_ELFv2)
12051 && type && TYPE_ALIGN (type) > 64)
12053 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
12054 or homogeneous float/vector aggregates here. We already handled
12055 vector aggregates above, but still need to check for float here. */
12056 bool aggregate_p = (AGGREGATE_TYPE_P (type)
12057 && !SCALAR_FLOAT_MODE_P (elt_mode));
12059 /* We used to check for BLKmode instead of the above aggregate type
12060 check. Warn when this results in any difference to the ABI. */
12061 if (aggregate_p != (mode == BLKmode))
12063 static bool warned;
12064 if (!warned && warn_psabi)
12066 warned = true;
12067 inform (input_location,
12068 "the ABI of passing aggregates with %d-byte alignment"
12069 " has changed in GCC 5",
12070 (int) TYPE_ALIGN (type) / BITS_PER_UNIT);
12074 if (aggregate_p)
12075 return 128;
12078 /* Similar for the Darwin64 ABI. Note that for historical reasons we
12079 implement the "aggregate type" check as a BLKmode check here; this
12080 means certain aggregate types are in fact not aligned. */
12081 if (TARGET_MACHO && rs6000_darwin64_abi
12082 && mode == BLKmode
12083 && type && TYPE_ALIGN (type) > 64)
12084 return 128;
12086 return PARM_BOUNDARY;
12089 /* The offset in words to the start of the parameter save area. */
12091 static unsigned int
12092 rs6000_parm_offset (void)
12094 return (DEFAULT_ABI == ABI_V4 ? 2
12095 : DEFAULT_ABI == ABI_ELFv2 ? 4
12096 : 6);
12099 /* For a function parm of MODE and TYPE, return the starting word in
12100 the parameter area. NWORDS of the parameter area are already used. */
12102 static unsigned int
12103 rs6000_parm_start (machine_mode mode, const_tree type,
12104 unsigned int nwords)
12106 unsigned int align;
12108 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
12109 return nwords + (-(rs6000_parm_offset () + nwords) & align);
12112 /* Compute the size (in words) of a function argument. */
12114 static unsigned long
12115 rs6000_arg_size (machine_mode mode, const_tree type)
12117 unsigned long size;
12119 if (mode != BLKmode)
12120 size = GET_MODE_SIZE (mode);
12121 else
12122 size = int_size_in_bytes (type);
12124 if (TARGET_32BIT)
12125 return (size + 3) >> 2;
12126 else
12127 return (size + 7) >> 3;
12130 /* Use this to flush pending int fields. */
12132 static void
12133 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
12134 HOST_WIDE_INT bitpos, int final)
12136 unsigned int startbit, endbit;
12137 int intregs, intoffset;
12138 machine_mode mode;
12140 /* Handle the situations where a float is taking up the first half
12141 of the GPR, and the other half is empty (typically due to
12142 alignment restrictions). We can detect this by a 8-byte-aligned
12143 int field, or by seeing that this is the final flush for this
12144 argument. Count the word and continue on. */
12145 if (cum->floats_in_gpr == 1
12146 && (cum->intoffset % 64 == 0
12147 || (cum->intoffset == -1 && final)))
12149 cum->words++;
12150 cum->floats_in_gpr = 0;
12153 if (cum->intoffset == -1)
12154 return;
12156 intoffset = cum->intoffset;
12157 cum->intoffset = -1;
12158 cum->floats_in_gpr = 0;
12160 if (intoffset % BITS_PER_WORD != 0)
12162 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
12163 MODE_INT, 0);
12164 if (mode == BLKmode)
12166 /* We couldn't find an appropriate mode, which happens,
12167 e.g., in packed structs when there are 3 bytes to load.
12168 Back intoffset back to the beginning of the word in this
12169 case. */
12170 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
12174 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
12175 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
12176 intregs = (endbit - startbit) / BITS_PER_WORD;
12177 cum->words += intregs;
12178 /* words should be unsigned. */
12179 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
12181 int pad = (endbit/BITS_PER_WORD) - cum->words;
12182 cum->words += pad;
12186 /* The darwin64 ABI calls for us to recurse down through structs,
12187 looking for elements passed in registers. Unfortunately, we have
12188 to track int register count here also because of misalignments
12189 in powerpc alignment mode. */
12191 static void
12192 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
12193 const_tree type,
12194 HOST_WIDE_INT startbitpos)
12196 tree f;
12198 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
12199 if (TREE_CODE (f) == FIELD_DECL)
12201 HOST_WIDE_INT bitpos = startbitpos;
12202 tree ftype = TREE_TYPE (f);
12203 machine_mode mode;
12204 if (ftype == error_mark_node)
12205 continue;
12206 mode = TYPE_MODE (ftype);
12208 if (DECL_SIZE (f) != 0
12209 && tree_fits_uhwi_p (bit_position (f)))
12210 bitpos += int_bit_position (f);
12212 /* ??? FIXME: else assume zero offset. */
12214 if (TREE_CODE (ftype) == RECORD_TYPE)
12215 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
12216 else if (USE_FP_FOR_ARG_P (cum, mode))
12218 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
12219 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
12220 cum->fregno += n_fpregs;
12221 /* Single-precision floats present a special problem for
12222 us, because they are smaller than an 8-byte GPR, and so
12223 the structure-packing rules combined with the standard
12224 varargs behavior mean that we want to pack float/float
12225 and float/int combinations into a single register's
12226 space. This is complicated by the arg advance flushing,
12227 which works on arbitrarily large groups of int-type
12228 fields. */
12229 if (mode == SFmode)
12231 if (cum->floats_in_gpr == 1)
12233 /* Two floats in a word; count the word and reset
12234 the float count. */
12235 cum->words++;
12236 cum->floats_in_gpr = 0;
12238 else if (bitpos % 64 == 0)
12240 /* A float at the beginning of an 8-byte word;
12241 count it and put off adjusting cum->words until
12242 we see if a arg advance flush is going to do it
12243 for us. */
12244 cum->floats_in_gpr++;
12246 else
12248 /* The float is at the end of a word, preceded
12249 by integer fields, so the arg advance flush
12250 just above has already set cum->words and
12251 everything is taken care of. */
12254 else
12255 cum->words += n_fpregs;
12257 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
12259 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
12260 cum->vregno++;
12261 cum->words += 2;
12263 else if (cum->intoffset == -1)
12264 cum->intoffset = bitpos;
12268 /* Check for an item that needs to be considered specially under the darwin 64
12269 bit ABI. These are record types where the mode is BLK or the structure is
12270 8 bytes in size. */
12271 static int
12272 rs6000_darwin64_struct_check_p (machine_mode mode, const_tree type)
12274 return rs6000_darwin64_abi
12275 && ((mode == BLKmode
12276 && TREE_CODE (type) == RECORD_TYPE
12277 && int_size_in_bytes (type) > 0)
12278 || (type && TREE_CODE (type) == RECORD_TYPE
12279 && int_size_in_bytes (type) == 8)) ? 1 : 0;
12282 /* Update the data in CUM to advance over an argument
12283 of mode MODE and data type TYPE.
12284 (TYPE is null for libcalls where that information may not be available.)
12286 Note that for args passed by reference, function_arg will be called
12287 with MODE and TYPE set to that of the pointer to the arg, not the arg
12288 itself. */
12290 static void
12291 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, machine_mode mode,
12292 const_tree type, bool named, int depth)
12294 machine_mode elt_mode;
12295 int n_elts;
12297 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
12299 /* Only tick off an argument if we're not recursing. */
12300 if (depth == 0)
12301 cum->nargs_prototype--;
12303 #ifdef HAVE_AS_GNU_ATTRIBUTE
12304 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4)
12305 && cum->escapes)
12307 if (SCALAR_FLOAT_MODE_P (mode))
12309 rs6000_passes_float = true;
12310 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
12311 && (FLOAT128_IBM_P (mode)
12312 || FLOAT128_IEEE_P (mode)
12313 || (type != NULL
12314 && TYPE_MAIN_VARIANT (type) == long_double_type_node)))
12315 rs6000_passes_long_double = true;
12317 if ((named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
12318 || (SPE_VECTOR_MODE (mode)
12319 && !cum->stdarg
12320 && cum->sysv_gregno <= GP_ARG_MAX_REG))
12321 rs6000_passes_vector = true;
12323 #endif
12325 if (TARGET_ALTIVEC_ABI
12326 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
12327 || (type && TREE_CODE (type) == VECTOR_TYPE
12328 && int_size_in_bytes (type) == 16)))
12330 bool stack = false;
12332 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
12334 cum->vregno += n_elts;
12336 if (!TARGET_ALTIVEC)
12337 error ("cannot pass argument in vector register because"
12338 " altivec instructions are disabled, use -maltivec"
12339 " to enable them");
12341 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
12342 even if it is going to be passed in a vector register.
12343 Darwin does the same for variable-argument functions. */
12344 if (((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12345 && TARGET_64BIT)
12346 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
12347 stack = true;
12349 else
12350 stack = true;
12352 if (stack)
12354 int align;
12356 /* Vector parameters must be 16-byte aligned. In 32-bit
12357 mode this means we need to take into account the offset
12358 to the parameter save area. In 64-bit mode, they just
12359 have to start on an even word, since the parameter save
12360 area is 16-byte aligned. */
12361 if (TARGET_32BIT)
12362 align = -(rs6000_parm_offset () + cum->words) & 3;
12363 else
12364 align = cum->words & 1;
12365 cum->words += align + rs6000_arg_size (mode, type);
12367 if (TARGET_DEBUG_ARG)
12369 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
12370 cum->words, align);
12371 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
12372 cum->nargs_prototype, cum->prototype,
12373 GET_MODE_NAME (mode));
12377 else if (TARGET_SPE_ABI && TARGET_SPE && SPE_VECTOR_MODE (mode)
12378 && !cum->stdarg
12379 && cum->sysv_gregno <= GP_ARG_MAX_REG)
12380 cum->sysv_gregno++;
12382 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
12384 int size = int_size_in_bytes (type);
12385 /* Variable sized types have size == -1 and are
12386 treated as if consisting entirely of ints.
12387 Pad to 16 byte boundary if needed. */
12388 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
12389 && (cum->words % 2) != 0)
12390 cum->words++;
12391 /* For varargs, we can just go up by the size of the struct. */
12392 if (!named)
12393 cum->words += (size + 7) / 8;
12394 else
12396 /* It is tempting to say int register count just goes up by
12397 sizeof(type)/8, but this is wrong in a case such as
12398 { int; double; int; } [powerpc alignment]. We have to
12399 grovel through the fields for these too. */
12400 cum->intoffset = 0;
12401 cum->floats_in_gpr = 0;
12402 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
12403 rs6000_darwin64_record_arg_advance_flush (cum,
12404 size * BITS_PER_UNIT, 1);
12406 if (TARGET_DEBUG_ARG)
12408 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
12409 cum->words, TYPE_ALIGN (type), size);
12410 fprintf (stderr,
12411 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
12412 cum->nargs_prototype, cum->prototype,
12413 GET_MODE_NAME (mode));
12416 else if (DEFAULT_ABI == ABI_V4)
12418 if (abi_v4_pass_in_fpr (mode))
12420 /* _Decimal128 must use an even/odd register pair. This assumes
12421 that the register number is odd when fregno is odd. */
12422 if (mode == TDmode && (cum->fregno % 2) == 1)
12423 cum->fregno++;
12425 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
12426 <= FP_ARG_V4_MAX_REG)
12427 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
12428 else
12430 cum->fregno = FP_ARG_V4_MAX_REG + 1;
12431 if (mode == DFmode || FLOAT128_IBM_P (mode)
12432 || mode == DDmode || mode == TDmode)
12433 cum->words += cum->words & 1;
12434 cum->words += rs6000_arg_size (mode, type);
12437 else
12439 int n_words = rs6000_arg_size (mode, type);
12440 int gregno = cum->sysv_gregno;
12442 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
12443 (r7,r8) or (r9,r10). As does any other 2 word item such
12444 as complex int due to a historical mistake. */
12445 if (n_words == 2)
12446 gregno += (1 - gregno) & 1;
12448 /* Multi-reg args are not split between registers and stack. */
12449 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
12451 /* Long long and SPE vectors are aligned on the stack.
12452 So are other 2 word items such as complex int due to
12453 a historical mistake. */
12454 if (n_words == 2)
12455 cum->words += cum->words & 1;
12456 cum->words += n_words;
12459 /* Note: continuing to accumulate gregno past when we've started
12460 spilling to the stack indicates the fact that we've started
12461 spilling to the stack to expand_builtin_saveregs. */
12462 cum->sysv_gregno = gregno + n_words;
12465 if (TARGET_DEBUG_ARG)
12467 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
12468 cum->words, cum->fregno);
12469 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
12470 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
12471 fprintf (stderr, "mode = %4s, named = %d\n",
12472 GET_MODE_NAME (mode), named);
12475 else
12477 int n_words = rs6000_arg_size (mode, type);
12478 int start_words = cum->words;
12479 int align_words = rs6000_parm_start (mode, type, start_words);
12481 cum->words = align_words + n_words;
12483 if (SCALAR_FLOAT_MODE_P (elt_mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
12485 /* _Decimal128 must be passed in an even/odd float register pair.
12486 This assumes that the register number is odd when fregno is
12487 odd. */
12488 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
12489 cum->fregno++;
12490 cum->fregno += n_elts * ((GET_MODE_SIZE (elt_mode) + 7) >> 3);
12493 if (TARGET_DEBUG_ARG)
12495 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
12496 cum->words, cum->fregno);
12497 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
12498 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
12499 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
12500 named, align_words - start_words, depth);
12505 static void
12506 rs6000_function_arg_advance (cumulative_args_t cum, machine_mode mode,
12507 const_tree type, bool named)
12509 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
12513 static rtx
12514 spe_build_register_parallel (machine_mode mode, int gregno)
12516 rtx r1, r3, r5, r7;
12518 switch (mode)
12520 case DFmode:
12521 r1 = gen_rtx_REG (DImode, gregno);
12522 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
12523 return gen_rtx_PARALLEL (mode, gen_rtvec (1, r1));
12525 case DCmode:
12526 case TFmode:
12527 r1 = gen_rtx_REG (DImode, gregno);
12528 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
12529 r3 = gen_rtx_REG (DImode, gregno + 2);
12530 r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
12531 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r3));
12533 case TCmode:
12534 r1 = gen_rtx_REG (DImode, gregno);
12535 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
12536 r3 = gen_rtx_REG (DImode, gregno + 2);
12537 r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
12538 r5 = gen_rtx_REG (DImode, gregno + 4);
12539 r5 = gen_rtx_EXPR_LIST (VOIDmode, r5, GEN_INT (16));
12540 r7 = gen_rtx_REG (DImode, gregno + 6);
12541 r7 = gen_rtx_EXPR_LIST (VOIDmode, r7, GEN_INT (24));
12542 return gen_rtx_PARALLEL (mode, gen_rtvec (4, r1, r3, r5, r7));
12544 default:
12545 gcc_unreachable ();
12549 /* Determine where to put a SIMD argument on the SPE. */
12550 static rtx
12551 rs6000_spe_function_arg (const CUMULATIVE_ARGS *cum, machine_mode mode,
12552 const_tree type)
12554 int gregno = cum->sysv_gregno;
12556 /* On E500 v2, double arithmetic is done on the full 64-bit GPR, but
12557 are passed and returned in a pair of GPRs for ABI compatibility. */
12558 if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
12559 || mode == DCmode || mode == TCmode))
12561 int n_words = rs6000_arg_size (mode, type);
12563 /* Doubles go in an odd/even register pair (r5/r6, etc). */
12564 if (mode == DFmode)
12565 gregno += (1 - gregno) & 1;
12567 /* Multi-reg args are not split between registers and stack. */
12568 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
12569 return NULL_RTX;
12571 return spe_build_register_parallel (mode, gregno);
12573 if (cum->stdarg)
12575 int n_words = rs6000_arg_size (mode, type);
12577 /* SPE vectors are put in odd registers. */
12578 if (n_words == 2 && (gregno & 1) == 0)
12579 gregno += 1;
12581 if (gregno + n_words - 1 <= GP_ARG_MAX_REG)
12583 rtx r1, r2;
12584 machine_mode m = SImode;
12586 r1 = gen_rtx_REG (m, gregno);
12587 r1 = gen_rtx_EXPR_LIST (m, r1, const0_rtx);
12588 r2 = gen_rtx_REG (m, gregno + 1);
12589 r2 = gen_rtx_EXPR_LIST (m, r2, GEN_INT (4));
12590 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
12592 else
12593 return NULL_RTX;
12595 else
12597 if (gregno <= GP_ARG_MAX_REG)
12598 return gen_rtx_REG (mode, gregno);
12599 else
12600 return NULL_RTX;
12604 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
12605 structure between cum->intoffset and bitpos to integer registers. */
12607 static void
12608 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
12609 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
12611 machine_mode mode;
12612 unsigned int regno;
12613 unsigned int startbit, endbit;
12614 int this_regno, intregs, intoffset;
12615 rtx reg;
12617 if (cum->intoffset == -1)
12618 return;
12620 intoffset = cum->intoffset;
12621 cum->intoffset = -1;
12623 /* If this is the trailing part of a word, try to only load that
12624 much into the register. Otherwise load the whole register. Note
12625 that in the latter case we may pick up unwanted bits. It's not a
12626 problem at the moment but may wish to revisit. */
12628 if (intoffset % BITS_PER_WORD != 0)
12630 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
12631 MODE_INT, 0);
12632 if (mode == BLKmode)
12634 /* We couldn't find an appropriate mode, which happens,
12635 e.g., in packed structs when there are 3 bytes to load.
12636 Back intoffset back to the beginning of the word in this
12637 case. */
12638 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
12639 mode = word_mode;
12642 else
12643 mode = word_mode;
12645 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
12646 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
12647 intregs = (endbit - startbit) / BITS_PER_WORD;
12648 this_regno = cum->words + intoffset / BITS_PER_WORD;
12650 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
12651 cum->use_stack = 1;
12653 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
12654 if (intregs <= 0)
12655 return;
12657 intoffset /= BITS_PER_UNIT;
12660 regno = GP_ARG_MIN_REG + this_regno;
12661 reg = gen_rtx_REG (mode, regno);
12662 rvec[(*k)++] =
12663 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
12665 this_regno += 1;
12666 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
12667 mode = word_mode;
12668 intregs -= 1;
12670 while (intregs > 0);
12673 /* Recursive workhorse for the following. */
12675 static void
12676 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
12677 HOST_WIDE_INT startbitpos, rtx rvec[],
12678 int *k)
12680 tree f;
12682 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
12683 if (TREE_CODE (f) == FIELD_DECL)
12685 HOST_WIDE_INT bitpos = startbitpos;
12686 tree ftype = TREE_TYPE (f);
12687 machine_mode mode;
12688 if (ftype == error_mark_node)
12689 continue;
12690 mode = TYPE_MODE (ftype);
12692 if (DECL_SIZE (f) != 0
12693 && tree_fits_uhwi_p (bit_position (f)))
12694 bitpos += int_bit_position (f);
12696 /* ??? FIXME: else assume zero offset. */
12698 if (TREE_CODE (ftype) == RECORD_TYPE)
12699 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
12700 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode))
12702 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
12703 #if 0
12704 switch (mode)
12706 case SCmode: mode = SFmode; break;
12707 case DCmode: mode = DFmode; break;
12708 case TCmode: mode = TFmode; break;
12709 default: break;
12711 #endif
12712 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
12713 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
12715 gcc_assert (cum->fregno == FP_ARG_MAX_REG
12716 && (mode == TFmode || mode == TDmode));
12717 /* Long double or _Decimal128 split over regs and memory. */
12718 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
12719 cum->use_stack=1;
12721 rvec[(*k)++]
12722 = gen_rtx_EXPR_LIST (VOIDmode,
12723 gen_rtx_REG (mode, cum->fregno++),
12724 GEN_INT (bitpos / BITS_PER_UNIT));
12725 if (FLOAT128_2REG_P (mode))
12726 cum->fregno++;
12728 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
12730 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
12731 rvec[(*k)++]
12732 = gen_rtx_EXPR_LIST (VOIDmode,
12733 gen_rtx_REG (mode, cum->vregno++),
12734 GEN_INT (bitpos / BITS_PER_UNIT));
12736 else if (cum->intoffset == -1)
12737 cum->intoffset = bitpos;
12741 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
12742 the register(s) to be used for each field and subfield of a struct
12743 being passed by value, along with the offset of where the
12744 register's value may be found in the block. FP fields go in FP
12745 register, vector fields go in vector registers, and everything
12746 else goes in int registers, packed as in memory.
12748 This code is also used for function return values. RETVAL indicates
12749 whether this is the case.
12751 Much of this is taken from the SPARC V9 port, which has a similar
12752 calling convention. */
12754 static rtx
12755 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
12756 bool named, bool retval)
12758 rtx rvec[FIRST_PSEUDO_REGISTER];
12759 int k = 1, kbase = 1;
12760 HOST_WIDE_INT typesize = int_size_in_bytes (type);
12761 /* This is a copy; modifications are not visible to our caller. */
12762 CUMULATIVE_ARGS copy_cum = *orig_cum;
12763 CUMULATIVE_ARGS *cum = &copy_cum;
12765 /* Pad to 16 byte boundary if needed. */
12766 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
12767 && (cum->words % 2) != 0)
12768 cum->words++;
12770 cum->intoffset = 0;
12771 cum->use_stack = 0;
12772 cum->named = named;
12774 /* Put entries into rvec[] for individual FP and vector fields, and
12775 for the chunks of memory that go in int regs. Note we start at
12776 element 1; 0 is reserved for an indication of using memory, and
12777 may or may not be filled in below. */
12778 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
12779 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
12781 /* If any part of the struct went on the stack put all of it there.
12782 This hack is because the generic code for
12783 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
12784 parts of the struct are not at the beginning. */
12785 if (cum->use_stack)
12787 if (retval)
12788 return NULL_RTX; /* doesn't go in registers at all */
12789 kbase = 0;
12790 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12792 if (k > 1 || cum->use_stack)
12793 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
12794 else
12795 return NULL_RTX;
12798 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
12800 static rtx
12801 rs6000_mixed_function_arg (machine_mode mode, const_tree type,
12802 int align_words)
12804 int n_units;
12805 int i, k;
12806 rtx rvec[GP_ARG_NUM_REG + 1];
12808 if (align_words >= GP_ARG_NUM_REG)
12809 return NULL_RTX;
12811 n_units = rs6000_arg_size (mode, type);
12813 /* Optimize the simple case where the arg fits in one gpr, except in
12814 the case of BLKmode due to assign_parms assuming that registers are
12815 BITS_PER_WORD wide. */
12816 if (n_units == 0
12817 || (n_units == 1 && mode != BLKmode))
12818 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12820 k = 0;
12821 if (align_words + n_units > GP_ARG_NUM_REG)
12822 /* Not all of the arg fits in gprs. Say that it goes in memory too,
12823 using a magic NULL_RTX component.
12824 This is not strictly correct. Only some of the arg belongs in
12825 memory, not all of it. However, the normal scheme using
12826 function_arg_partial_nregs can result in unusual subregs, eg.
12827 (subreg:SI (reg:DF) 4), which are not handled well. The code to
12828 store the whole arg to memory is often more efficient than code
12829 to store pieces, and we know that space is available in the right
12830 place for the whole arg. */
12831 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12833 i = 0;
12836 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
12837 rtx off = GEN_INT (i++ * 4);
12838 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12840 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
12842 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
12845 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
12846 but must also be copied into the parameter save area starting at
12847 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
12848 to the GPRs and/or memory. Return the number of elements used. */
12850 static int
12851 rs6000_psave_function_arg (machine_mode mode, const_tree type,
12852 int align_words, rtx *rvec)
12854 int k = 0;
12856 if (align_words < GP_ARG_NUM_REG)
12858 int n_words = rs6000_arg_size (mode, type);
12860 if (align_words + n_words > GP_ARG_NUM_REG
12861 || mode == BLKmode
12862 || (TARGET_32BIT && TARGET_POWERPC64))
12864 /* If this is partially on the stack, then we only
12865 include the portion actually in registers here. */
12866 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
12867 int i = 0;
12869 if (align_words + n_words > GP_ARG_NUM_REG)
12871 /* Not all of the arg fits in gprs. Say that it goes in memory
12872 too, using a magic NULL_RTX component. Also see comment in
12873 rs6000_mixed_function_arg for why the normal
12874 function_arg_partial_nregs scheme doesn't work in this case. */
12875 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12880 rtx r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
12881 rtx off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
12882 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12884 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
12886 else
12888 /* The whole arg fits in gprs. */
12889 rtx r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12890 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
12893 else
12895 /* It's entirely in memory. */
12896 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12899 return k;
12902 /* RVEC is a vector of K components of an argument of mode MODE.
12903 Construct the final function_arg return value from it. */
12905 static rtx
12906 rs6000_finish_function_arg (machine_mode mode, rtx *rvec, int k)
12908 gcc_assert (k >= 1);
12910 /* Avoid returning a PARALLEL in the trivial cases. */
12911 if (k == 1)
12913 if (XEXP (rvec[0], 0) == NULL_RTX)
12914 return NULL_RTX;
12916 if (GET_MODE (XEXP (rvec[0], 0)) == mode)
12917 return XEXP (rvec[0], 0);
12920 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
12923 /* Determine where to put an argument to a function.
12924 Value is zero to push the argument on the stack,
12925 or a hard register in which to store the argument.
12927 MODE is the argument's machine mode.
12928 TYPE is the data type of the argument (as a tree).
12929 This is null for libcalls where that information may
12930 not be available.
12931 CUM is a variable of type CUMULATIVE_ARGS which gives info about
12932 the preceding args and about the function being called. It is
12933 not modified in this routine.
12934 NAMED is nonzero if this argument is a named parameter
12935 (otherwise it is an extra parameter matching an ellipsis).
12937 On RS/6000 the first eight words of non-FP are normally in registers
12938 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
12939 Under V.4, the first 8 FP args are in registers.
12941 If this is floating-point and no prototype is specified, we use
12942 both an FP and integer register (or possibly FP reg and stack). Library
12943 functions (when CALL_LIBCALL is set) always have the proper types for args,
12944 so we can pass the FP value just in one register. emit_library_function
12945 doesn't support PARALLEL anyway.
12947 Note that for args passed by reference, function_arg will be called
12948 with MODE and TYPE set to that of the pointer to the arg, not the arg
12949 itself. */
12951 static rtx
12952 rs6000_function_arg (cumulative_args_t cum_v, machine_mode mode,
12953 const_tree type, bool named)
12955 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12956 enum rs6000_abi abi = DEFAULT_ABI;
12957 machine_mode elt_mode;
12958 int n_elts;
12960 /* Return a marker to indicate whether CR1 needs to set or clear the
12961 bit that V.4 uses to say fp args were passed in registers.
12962 Assume that we don't need the marker for software floating point,
12963 or compiler generated library calls. */
12964 if (mode == VOIDmode)
12966 if (abi == ABI_V4
12967 && (cum->call_cookie & CALL_LIBCALL) == 0
12968 && (cum->stdarg
12969 || (cum->nargs_prototype < 0
12970 && (cum->prototype || TARGET_NO_PROTOTYPE))))
12972 /* For the SPE, we need to crxor CR6 always. */
12973 if (TARGET_SPE_ABI)
12974 return GEN_INT (cum->call_cookie | CALL_V4_SET_FP_ARGS);
12975 else if (TARGET_HARD_FLOAT && TARGET_FPRS)
12976 return GEN_INT (cum->call_cookie
12977 | ((cum->fregno == FP_ARG_MIN_REG)
12978 ? CALL_V4_SET_FP_ARGS
12979 : CALL_V4_CLEAR_FP_ARGS));
12982 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
12985 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
12987 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
12989 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
12990 if (rslt != NULL_RTX)
12991 return rslt;
12992 /* Else fall through to usual handling. */
12995 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
12997 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
12998 rtx r, off;
12999 int i, k = 0;
13001 /* Do we also need to pass this argument in the parameter save area?
13002 Library support functions for IEEE 128-bit are assumed to not need the
13003 value passed both in GPRs and in vector registers. */
13004 if (TARGET_64BIT && !cum->prototype
13005 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
13007 int align_words = ROUND_UP (cum->words, 2);
13008 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
13011 /* Describe where this argument goes in the vector registers. */
13012 for (i = 0; i < n_elts && cum->vregno + i <= ALTIVEC_ARG_MAX_REG; i++)
13014 r = gen_rtx_REG (elt_mode, cum->vregno + i);
13015 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
13016 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
13019 return rs6000_finish_function_arg (mode, rvec, k);
13021 else if (TARGET_ALTIVEC_ABI
13022 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
13023 || (type && TREE_CODE (type) == VECTOR_TYPE
13024 && int_size_in_bytes (type) == 16)))
13026 if (named || abi == ABI_V4)
13027 return NULL_RTX;
13028 else
13030 /* Vector parameters to varargs functions under AIX or Darwin
13031 get passed in memory and possibly also in GPRs. */
13032 int align, align_words, n_words;
13033 machine_mode part_mode;
13035 /* Vector parameters must be 16-byte aligned. In 32-bit
13036 mode this means we need to take into account the offset
13037 to the parameter save area. In 64-bit mode, they just
13038 have to start on an even word, since the parameter save
13039 area is 16-byte aligned. */
13040 if (TARGET_32BIT)
13041 align = -(rs6000_parm_offset () + cum->words) & 3;
13042 else
13043 align = cum->words & 1;
13044 align_words = cum->words + align;
13046 /* Out of registers? Memory, then. */
13047 if (align_words >= GP_ARG_NUM_REG)
13048 return NULL_RTX;
13050 if (TARGET_32BIT && TARGET_POWERPC64)
13051 return rs6000_mixed_function_arg (mode, type, align_words);
13053 /* The vector value goes in GPRs. Only the part of the
13054 value in GPRs is reported here. */
13055 part_mode = mode;
13056 n_words = rs6000_arg_size (mode, type);
13057 if (align_words + n_words > GP_ARG_NUM_REG)
13058 /* Fortunately, there are only two possibilities, the value
13059 is either wholly in GPRs or half in GPRs and half not. */
13060 part_mode = DImode;
13062 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
13065 else if (TARGET_SPE_ABI && TARGET_SPE
13066 && (SPE_VECTOR_MODE (mode)
13067 || (TARGET_E500_DOUBLE && (mode == DFmode
13068 || mode == DCmode
13069 || mode == TFmode
13070 || mode == TCmode))))
13071 return rs6000_spe_function_arg (cum, mode, type);
13073 else if (abi == ABI_V4)
13075 if (abi_v4_pass_in_fpr (mode))
13077 /* _Decimal128 must use an even/odd register pair. This assumes
13078 that the register number is odd when fregno is odd. */
13079 if (mode == TDmode && (cum->fregno % 2) == 1)
13080 cum->fregno++;
13082 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
13083 <= FP_ARG_V4_MAX_REG)
13084 return gen_rtx_REG (mode, cum->fregno);
13085 else
13086 return NULL_RTX;
13088 else
13090 int n_words = rs6000_arg_size (mode, type);
13091 int gregno = cum->sysv_gregno;
13093 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
13094 (r7,r8) or (r9,r10). As does any other 2 word item such
13095 as complex int due to a historical mistake. */
13096 if (n_words == 2)
13097 gregno += (1 - gregno) & 1;
13099 /* Multi-reg args are not split between registers and stack. */
13100 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
13101 return NULL_RTX;
13103 if (TARGET_32BIT && TARGET_POWERPC64)
13104 return rs6000_mixed_function_arg (mode, type,
13105 gregno - GP_ARG_MIN_REG);
13106 return gen_rtx_REG (mode, gregno);
13109 else
13111 int align_words = rs6000_parm_start (mode, type, cum->words);
13113 /* _Decimal128 must be passed in an even/odd float register pair.
13114 This assumes that the register number is odd when fregno is odd. */
13115 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
13116 cum->fregno++;
13118 if (USE_FP_FOR_ARG_P (cum, elt_mode))
13120 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
13121 rtx r, off;
13122 int i, k = 0;
13123 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
13124 int fpr_words;
13126 /* Do we also need to pass this argument in the parameter
13127 save area? */
13128 if (type && (cum->nargs_prototype <= 0
13129 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
13130 && TARGET_XL_COMPAT
13131 && align_words >= GP_ARG_NUM_REG)))
13132 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
13134 /* Describe where this argument goes in the fprs. */
13135 for (i = 0; i < n_elts
13136 && cum->fregno + i * n_fpreg <= FP_ARG_MAX_REG; i++)
13138 /* Check if the argument is split over registers and memory.
13139 This can only ever happen for long double or _Decimal128;
13140 complex types are handled via split_complex_arg. */
13141 machine_mode fmode = elt_mode;
13142 if (cum->fregno + (i + 1) * n_fpreg > FP_ARG_MAX_REG + 1)
13144 gcc_assert (FLOAT128_2REG_P (fmode));
13145 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
13148 r = gen_rtx_REG (fmode, cum->fregno + i * n_fpreg);
13149 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
13150 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
13153 /* If there were not enough FPRs to hold the argument, the rest
13154 usually goes into memory. However, if the current position
13155 is still within the register parameter area, a portion may
13156 actually have to go into GPRs.
13158 Note that it may happen that the portion of the argument
13159 passed in the first "half" of the first GPR was already
13160 passed in the last FPR as well.
13162 For unnamed arguments, we already set up GPRs to cover the
13163 whole argument in rs6000_psave_function_arg, so there is
13164 nothing further to do at this point. */
13165 fpr_words = (i * GET_MODE_SIZE (elt_mode)) / (TARGET_32BIT ? 4 : 8);
13166 if (i < n_elts && align_words + fpr_words < GP_ARG_NUM_REG
13167 && cum->nargs_prototype > 0)
13169 static bool warned;
13171 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
13172 int n_words = rs6000_arg_size (mode, type);
13174 align_words += fpr_words;
13175 n_words -= fpr_words;
13179 r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
13180 off = GEN_INT (fpr_words++ * GET_MODE_SIZE (rmode));
13181 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
13183 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
13185 if (!warned && warn_psabi)
13187 warned = true;
13188 inform (input_location,
13189 "the ABI of passing homogeneous float aggregates"
13190 " has changed in GCC 5");
13194 return rs6000_finish_function_arg (mode, rvec, k);
13196 else if (align_words < GP_ARG_NUM_REG)
13198 if (TARGET_32BIT && TARGET_POWERPC64)
13199 return rs6000_mixed_function_arg (mode, type, align_words);
13201 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
13203 else
13204 return NULL_RTX;
13208 /* For an arg passed partly in registers and partly in memory, this is
13209 the number of bytes passed in registers. For args passed entirely in
13210 registers or entirely in memory, zero. When an arg is described by a
13211 PARALLEL, perhaps using more than one register type, this function
13212 returns the number of bytes used by the first element of the PARALLEL. */
13214 static int
13215 rs6000_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
13216 tree type, bool named)
13218 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
13219 bool passed_in_gprs = true;
13220 int ret = 0;
13221 int align_words;
13222 machine_mode elt_mode;
13223 int n_elts;
13225 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
13227 if (DEFAULT_ABI == ABI_V4)
13228 return 0;
13230 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
13232 /* If we are passing this arg in the fixed parameter save area (gprs or
13233 memory) as well as VRs, we do not use the partial bytes mechanism;
13234 instead, rs6000_function_arg will return a PARALLEL including a memory
13235 element as necessary. Library support functions for IEEE 128-bit are
13236 assumed to not need the value passed both in GPRs and in vector
13237 registers. */
13238 if (TARGET_64BIT && !cum->prototype
13239 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
13240 return 0;
13242 /* Otherwise, we pass in VRs only. Check for partial copies. */
13243 passed_in_gprs = false;
13244 if (cum->vregno + n_elts > ALTIVEC_ARG_MAX_REG + 1)
13245 ret = (ALTIVEC_ARG_MAX_REG + 1 - cum->vregno) * 16;
13248 /* In this complicated case we just disable the partial_nregs code. */
13249 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
13250 return 0;
13252 align_words = rs6000_parm_start (mode, type, cum->words);
13254 if (USE_FP_FOR_ARG_P (cum, elt_mode))
13256 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
13258 /* If we are passing this arg in the fixed parameter save area
13259 (gprs or memory) as well as FPRs, we do not use the partial
13260 bytes mechanism; instead, rs6000_function_arg will return a
13261 PARALLEL including a memory element as necessary. */
13262 if (type
13263 && (cum->nargs_prototype <= 0
13264 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
13265 && TARGET_XL_COMPAT
13266 && align_words >= GP_ARG_NUM_REG)))
13267 return 0;
13269 /* Otherwise, we pass in FPRs only. Check for partial copies. */
13270 passed_in_gprs = false;
13271 if (cum->fregno + n_elts * n_fpreg > FP_ARG_MAX_REG + 1)
13273 /* Compute number of bytes / words passed in FPRs. If there
13274 is still space available in the register parameter area
13275 *after* that amount, a part of the argument will be passed
13276 in GPRs. In that case, the total amount passed in any
13277 registers is equal to the amount that would have been passed
13278 in GPRs if everything were passed there, so we fall back to
13279 the GPR code below to compute the appropriate value. */
13280 int fpr = ((FP_ARG_MAX_REG + 1 - cum->fregno)
13281 * MIN (8, GET_MODE_SIZE (elt_mode)));
13282 int fpr_words = fpr / (TARGET_32BIT ? 4 : 8);
13284 if (align_words + fpr_words < GP_ARG_NUM_REG)
13285 passed_in_gprs = true;
13286 else
13287 ret = fpr;
13291 if (passed_in_gprs
13292 && align_words < GP_ARG_NUM_REG
13293 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
13294 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
13296 if (ret != 0 && TARGET_DEBUG_ARG)
13297 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
13299 return ret;
13302 /* A C expression that indicates when an argument must be passed by
13303 reference. If nonzero for an argument, a copy of that argument is
13304 made in memory and a pointer to the argument is passed instead of
13305 the argument itself. The pointer is passed in whatever way is
13306 appropriate for passing a pointer to that type.
13308 Under V.4, aggregates and long double are passed by reference.
13310 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
13311 reference unless the AltiVec vector extension ABI is in force.
13313 As an extension to all ABIs, variable sized types are passed by
13314 reference. */
13316 static bool
13317 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
13318 machine_mode mode, const_tree type,
13319 bool named ATTRIBUTE_UNUSED)
13321 if (!type)
13322 return 0;
13324 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
13325 && FLOAT128_IEEE_P (TYPE_MODE (type)))
13327 if (TARGET_DEBUG_ARG)
13328 fprintf (stderr, "function_arg_pass_by_reference: V4 IEEE 128-bit\n");
13329 return 1;
13332 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
13334 if (TARGET_DEBUG_ARG)
13335 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
13336 return 1;
13339 if (int_size_in_bytes (type) < 0)
13341 if (TARGET_DEBUG_ARG)
13342 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
13343 return 1;
13346 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
13347 modes only exist for GCC vector types if -maltivec. */
13348 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
13350 if (TARGET_DEBUG_ARG)
13351 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
13352 return 1;
13355 /* Pass synthetic vectors in memory. */
13356 if (TREE_CODE (type) == VECTOR_TYPE
13357 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
13359 static bool warned_for_pass_big_vectors = false;
13360 if (TARGET_DEBUG_ARG)
13361 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
13362 if (!warned_for_pass_big_vectors)
13364 warning (OPT_Wpsabi, "GCC vector passed by reference: "
13365 "non-standard ABI extension with no compatibility guarantee");
13366 warned_for_pass_big_vectors = true;
13368 return 1;
13371 return 0;
13374 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
13375 already processes. Return true if the parameter must be passed
13376 (fully or partially) on the stack. */
13378 static bool
13379 rs6000_parm_needs_stack (cumulative_args_t args_so_far, tree type)
13381 machine_mode mode;
13382 int unsignedp;
13383 rtx entry_parm;
13385 /* Catch errors. */
13386 if (type == NULL || type == error_mark_node)
13387 return true;
13389 /* Handle types with no storage requirement. */
13390 if (TYPE_MODE (type) == VOIDmode)
13391 return false;
13393 /* Handle complex types. */
13394 if (TREE_CODE (type) == COMPLEX_TYPE)
13395 return (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type))
13396 || rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type)));
13398 /* Handle transparent aggregates. */
13399 if ((TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == RECORD_TYPE)
13400 && TYPE_TRANSPARENT_AGGR (type))
13401 type = TREE_TYPE (first_field (type));
13403 /* See if this arg was passed by invisible reference. */
13404 if (pass_by_reference (get_cumulative_args (args_so_far),
13405 TYPE_MODE (type), type, true))
13406 type = build_pointer_type (type);
13408 /* Find mode as it is passed by the ABI. */
13409 unsignedp = TYPE_UNSIGNED (type);
13410 mode = promote_mode (type, TYPE_MODE (type), &unsignedp);
13412 /* If we must pass in stack, we need a stack. */
13413 if (rs6000_must_pass_in_stack (mode, type))
13414 return true;
13416 /* If there is no incoming register, we need a stack. */
13417 entry_parm = rs6000_function_arg (args_so_far, mode, type, true);
13418 if (entry_parm == NULL)
13419 return true;
13421 /* Likewise if we need to pass both in registers and on the stack. */
13422 if (GET_CODE (entry_parm) == PARALLEL
13423 && XEXP (XVECEXP (entry_parm, 0, 0), 0) == NULL_RTX)
13424 return true;
13426 /* Also true if we're partially in registers and partially not. */
13427 if (rs6000_arg_partial_bytes (args_so_far, mode, type, true) != 0)
13428 return true;
13430 /* Update info on where next arg arrives in registers. */
13431 rs6000_function_arg_advance (args_so_far, mode, type, true);
13432 return false;
13435 /* Return true if FUN has no prototype, has a variable argument
13436 list, or passes any parameter in memory. */
13438 static bool
13439 rs6000_function_parms_need_stack (tree fun, bool incoming)
13441 tree fntype, result;
13442 CUMULATIVE_ARGS args_so_far_v;
13443 cumulative_args_t args_so_far;
13445 if (!fun)
13446 /* Must be a libcall, all of which only use reg parms. */
13447 return false;
13449 fntype = fun;
13450 if (!TYPE_P (fun))
13451 fntype = TREE_TYPE (fun);
13453 /* Varargs functions need the parameter save area. */
13454 if ((!incoming && !prototype_p (fntype)) || stdarg_p (fntype))
13455 return true;
13457 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v, fntype, NULL_RTX);
13458 args_so_far = pack_cumulative_args (&args_so_far_v);
13460 /* When incoming, we will have been passed the function decl.
13461 It is necessary to use the decl to handle K&R style functions,
13462 where TYPE_ARG_TYPES may not be available. */
13463 if (incoming)
13465 gcc_assert (DECL_P (fun));
13466 result = DECL_RESULT (fun);
13468 else
13469 result = TREE_TYPE (fntype);
13471 if (result && aggregate_value_p (result, fntype))
13473 if (!TYPE_P (result))
13474 result = TREE_TYPE (result);
13475 result = build_pointer_type (result);
13476 rs6000_parm_needs_stack (args_so_far, result);
13479 if (incoming)
13481 tree parm;
13483 for (parm = DECL_ARGUMENTS (fun);
13484 parm && parm != void_list_node;
13485 parm = TREE_CHAIN (parm))
13486 if (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (parm)))
13487 return true;
13489 else
13491 function_args_iterator args_iter;
13492 tree arg_type;
13494 FOREACH_FUNCTION_ARGS (fntype, arg_type, args_iter)
13495 if (rs6000_parm_needs_stack (args_so_far, arg_type))
13496 return true;
13499 return false;
13502 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
13503 usually a constant depending on the ABI. However, in the ELFv2 ABI
13504 the register parameter area is optional when calling a function that
13505 has a prototype is scope, has no variable argument list, and passes
13506 all parameters in registers. */
13509 rs6000_reg_parm_stack_space (tree fun, bool incoming)
13511 int reg_parm_stack_space;
13513 switch (DEFAULT_ABI)
13515 default:
13516 reg_parm_stack_space = 0;
13517 break;
13519 case ABI_AIX:
13520 case ABI_DARWIN:
13521 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
13522 break;
13524 case ABI_ELFv2:
13525 /* ??? Recomputing this every time is a bit expensive. Is there
13526 a place to cache this information? */
13527 if (rs6000_function_parms_need_stack (fun, incoming))
13528 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
13529 else
13530 reg_parm_stack_space = 0;
13531 break;
13534 return reg_parm_stack_space;
13537 static void
13538 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
13540 int i;
13541 machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
13543 if (nregs == 0)
13544 return;
13546 for (i = 0; i < nregs; i++)
13548 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
13549 if (reload_completed)
13551 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
13552 tem = NULL_RTX;
13553 else
13554 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
13555 i * GET_MODE_SIZE (reg_mode));
13557 else
13558 tem = replace_equiv_address (tem, XEXP (tem, 0));
13560 gcc_assert (tem);
13562 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
13566 /* Perform any needed actions needed for a function that is receiving a
13567 variable number of arguments.
13569 CUM is as above.
13571 MODE and TYPE are the mode and type of the current parameter.
13573 PRETEND_SIZE is a variable that should be set to the amount of stack
13574 that must be pushed by the prolog to pretend that our caller pushed
13577 Normally, this macro will push all remaining incoming registers on the
13578 stack and set PRETEND_SIZE to the length of the registers pushed. */
13580 static void
13581 setup_incoming_varargs (cumulative_args_t cum, machine_mode mode,
13582 tree type, int *pretend_size ATTRIBUTE_UNUSED,
13583 int no_rtl)
13585 CUMULATIVE_ARGS next_cum;
13586 int reg_size = TARGET_32BIT ? 4 : 8;
13587 rtx save_area = NULL_RTX, mem;
13588 int first_reg_offset;
13589 alias_set_type set;
13591 /* Skip the last named argument. */
13592 next_cum = *get_cumulative_args (cum);
13593 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
13595 if (DEFAULT_ABI == ABI_V4)
13597 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
13599 if (! no_rtl)
13601 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
13602 HOST_WIDE_INT offset = 0;
13604 /* Try to optimize the size of the varargs save area.
13605 The ABI requires that ap.reg_save_area is doubleword
13606 aligned, but we don't need to allocate space for all
13607 the bytes, only those to which we actually will save
13608 anything. */
13609 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
13610 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
13611 if (TARGET_HARD_FLOAT && TARGET_FPRS
13612 && next_cum.fregno <= FP_ARG_V4_MAX_REG
13613 && cfun->va_list_fpr_size)
13615 if (gpr_reg_num)
13616 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
13617 * UNITS_PER_FP_WORD;
13618 if (cfun->va_list_fpr_size
13619 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
13620 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
13621 else
13622 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
13623 * UNITS_PER_FP_WORD;
13625 if (gpr_reg_num)
13627 offset = -((first_reg_offset * reg_size) & ~7);
13628 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
13630 gpr_reg_num = cfun->va_list_gpr_size;
13631 if (reg_size == 4 && (first_reg_offset & 1))
13632 gpr_reg_num++;
13634 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
13636 else if (fpr_size)
13637 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
13638 * UNITS_PER_FP_WORD
13639 - (int) (GP_ARG_NUM_REG * reg_size);
13641 if (gpr_size + fpr_size)
13643 rtx reg_save_area
13644 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
13645 gcc_assert (GET_CODE (reg_save_area) == MEM);
13646 reg_save_area = XEXP (reg_save_area, 0);
13647 if (GET_CODE (reg_save_area) == PLUS)
13649 gcc_assert (XEXP (reg_save_area, 0)
13650 == virtual_stack_vars_rtx);
13651 gcc_assert (GET_CODE (XEXP (reg_save_area, 1)) == CONST_INT);
13652 offset += INTVAL (XEXP (reg_save_area, 1));
13654 else
13655 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
13658 cfun->machine->varargs_save_offset = offset;
13659 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
13662 else
13664 first_reg_offset = next_cum.words;
13665 save_area = crtl->args.internal_arg_pointer;
13667 if (targetm.calls.must_pass_in_stack (mode, type))
13668 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
13671 set = get_varargs_alias_set ();
13672 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
13673 && cfun->va_list_gpr_size)
13675 int n_gpr, nregs = GP_ARG_NUM_REG - first_reg_offset;
13677 if (va_list_gpr_counter_field)
13678 /* V4 va_list_gpr_size counts number of registers needed. */
13679 n_gpr = cfun->va_list_gpr_size;
13680 else
13681 /* char * va_list instead counts number of bytes needed. */
13682 n_gpr = (cfun->va_list_gpr_size + reg_size - 1) / reg_size;
13684 if (nregs > n_gpr)
13685 nregs = n_gpr;
13687 mem = gen_rtx_MEM (BLKmode,
13688 plus_constant (Pmode, save_area,
13689 first_reg_offset * reg_size));
13690 MEM_NOTRAP_P (mem) = 1;
13691 set_mem_alias_set (mem, set);
13692 set_mem_align (mem, BITS_PER_WORD);
13694 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
13695 nregs);
13698 /* Save FP registers if needed. */
13699 if (DEFAULT_ABI == ABI_V4
13700 && TARGET_HARD_FLOAT && TARGET_FPRS
13701 && ! no_rtl
13702 && next_cum.fregno <= FP_ARG_V4_MAX_REG
13703 && cfun->va_list_fpr_size)
13705 int fregno = next_cum.fregno, nregs;
13706 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
13707 rtx lab = gen_label_rtx ();
13708 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
13709 * UNITS_PER_FP_WORD);
13711 emit_jump_insn
13712 (gen_rtx_SET (pc_rtx,
13713 gen_rtx_IF_THEN_ELSE (VOIDmode,
13714 gen_rtx_NE (VOIDmode, cr1,
13715 const0_rtx),
13716 gen_rtx_LABEL_REF (VOIDmode, lab),
13717 pc_rtx)));
13719 for (nregs = 0;
13720 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
13721 fregno++, off += UNITS_PER_FP_WORD, nregs++)
13723 mem = gen_rtx_MEM ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
13724 ? DFmode : SFmode,
13725 plus_constant (Pmode, save_area, off));
13726 MEM_NOTRAP_P (mem) = 1;
13727 set_mem_alias_set (mem, set);
13728 set_mem_align (mem, GET_MODE_ALIGNMENT (
13729 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
13730 ? DFmode : SFmode));
13731 emit_move_insn (mem, gen_rtx_REG (
13732 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
13733 ? DFmode : SFmode, fregno));
13736 emit_label (lab);
13740 /* Create the va_list data type. */
13742 static tree
13743 rs6000_build_builtin_va_list (void)
13745 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
13747 /* For AIX, prefer 'char *' because that's what the system
13748 header files like. */
13749 if (DEFAULT_ABI != ABI_V4)
13750 return build_pointer_type (char_type_node);
13752 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
13753 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
13754 get_identifier ("__va_list_tag"), record);
13756 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
13757 unsigned_char_type_node);
13758 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
13759 unsigned_char_type_node);
13760 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
13761 every user file. */
13762 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
13763 get_identifier ("reserved"), short_unsigned_type_node);
13764 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
13765 get_identifier ("overflow_arg_area"),
13766 ptr_type_node);
13767 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
13768 get_identifier ("reg_save_area"),
13769 ptr_type_node);
13771 va_list_gpr_counter_field = f_gpr;
13772 va_list_fpr_counter_field = f_fpr;
13774 DECL_FIELD_CONTEXT (f_gpr) = record;
13775 DECL_FIELD_CONTEXT (f_fpr) = record;
13776 DECL_FIELD_CONTEXT (f_res) = record;
13777 DECL_FIELD_CONTEXT (f_ovf) = record;
13778 DECL_FIELD_CONTEXT (f_sav) = record;
13780 TYPE_STUB_DECL (record) = type_decl;
13781 TYPE_NAME (record) = type_decl;
13782 TYPE_FIELDS (record) = f_gpr;
13783 DECL_CHAIN (f_gpr) = f_fpr;
13784 DECL_CHAIN (f_fpr) = f_res;
13785 DECL_CHAIN (f_res) = f_ovf;
13786 DECL_CHAIN (f_ovf) = f_sav;
13788 layout_type (record);
13790 /* The correct type is an array type of one element. */
13791 return build_array_type (record, build_index_type (size_zero_node));
13794 /* Implement va_start. */
13796 static void
13797 rs6000_va_start (tree valist, rtx nextarg)
13799 HOST_WIDE_INT words, n_gpr, n_fpr;
13800 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
13801 tree gpr, fpr, ovf, sav, t;
13803 /* Only SVR4 needs something special. */
13804 if (DEFAULT_ABI != ABI_V4)
13806 std_expand_builtin_va_start (valist, nextarg);
13807 return;
13810 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
13811 f_fpr = DECL_CHAIN (f_gpr);
13812 f_res = DECL_CHAIN (f_fpr);
13813 f_ovf = DECL_CHAIN (f_res);
13814 f_sav = DECL_CHAIN (f_ovf);
13816 valist = build_simple_mem_ref (valist);
13817 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
13818 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
13819 f_fpr, NULL_TREE);
13820 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
13821 f_ovf, NULL_TREE);
13822 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
13823 f_sav, NULL_TREE);
13825 /* Count number of gp and fp argument registers used. */
13826 words = crtl->args.info.words;
13827 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
13828 GP_ARG_NUM_REG);
13829 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
13830 FP_ARG_NUM_REG);
13832 if (TARGET_DEBUG_ARG)
13833 fprintf (stderr, "va_start: words = " HOST_WIDE_INT_PRINT_DEC", n_gpr = "
13834 HOST_WIDE_INT_PRINT_DEC", n_fpr = " HOST_WIDE_INT_PRINT_DEC"\n",
13835 words, n_gpr, n_fpr);
13837 if (cfun->va_list_gpr_size)
13839 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
13840 build_int_cst (NULL_TREE, n_gpr));
13841 TREE_SIDE_EFFECTS (t) = 1;
13842 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13845 if (cfun->va_list_fpr_size)
13847 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
13848 build_int_cst (NULL_TREE, n_fpr));
13849 TREE_SIDE_EFFECTS (t) = 1;
13850 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13852 #ifdef HAVE_AS_GNU_ATTRIBUTE
13853 if (call_ABI_of_interest (cfun->decl))
13854 rs6000_passes_float = true;
13855 #endif
13858 /* Find the overflow area. */
13859 t = make_tree (TREE_TYPE (ovf), crtl->args.internal_arg_pointer);
13860 if (words != 0)
13861 t = fold_build_pointer_plus_hwi (t, words * MIN_UNITS_PER_WORD);
13862 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
13863 TREE_SIDE_EFFECTS (t) = 1;
13864 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13866 /* If there were no va_arg invocations, don't set up the register
13867 save area. */
13868 if (!cfun->va_list_gpr_size
13869 && !cfun->va_list_fpr_size
13870 && n_gpr < GP_ARG_NUM_REG
13871 && n_fpr < FP_ARG_V4_MAX_REG)
13872 return;
13874 /* Find the register save area. */
13875 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
13876 if (cfun->machine->varargs_save_offset)
13877 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
13878 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
13879 TREE_SIDE_EFFECTS (t) = 1;
13880 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13883 /* Implement va_arg. */
13885 static tree
13886 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
13887 gimple_seq *post_p)
13889 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
13890 tree gpr, fpr, ovf, sav, reg, t, u;
13891 int size, rsize, n_reg, sav_ofs, sav_scale;
13892 tree lab_false, lab_over, addr;
13893 int align;
13894 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
13895 int regalign = 0;
13896 gimple *stmt;
13898 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
13900 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
13901 return build_va_arg_indirect_ref (t);
13904 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
13905 earlier version of gcc, with the property that it always applied alignment
13906 adjustments to the va-args (even for zero-sized types). The cheapest way
13907 to deal with this is to replicate the effect of the part of
13908 std_gimplify_va_arg_expr that carries out the align adjust, for the case
13909 of relevance.
13910 We don't need to check for pass-by-reference because of the test above.
13911 We can return a simplifed answer, since we know there's no offset to add. */
13913 if (((TARGET_MACHO
13914 && rs6000_darwin64_abi)
13915 || DEFAULT_ABI == ABI_ELFv2
13916 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
13917 && integer_zerop (TYPE_SIZE (type)))
13919 unsigned HOST_WIDE_INT align, boundary;
13920 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
13921 align = PARM_BOUNDARY / BITS_PER_UNIT;
13922 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
13923 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
13924 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
13925 boundary /= BITS_PER_UNIT;
13926 if (boundary > align)
13928 tree t ;
13929 /* This updates arg ptr by the amount that would be necessary
13930 to align the zero-sized (but not zero-alignment) item. */
13931 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
13932 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
13933 gimplify_and_add (t, pre_p);
13935 t = fold_convert (sizetype, valist_tmp);
13936 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
13937 fold_convert (TREE_TYPE (valist),
13938 fold_build2 (BIT_AND_EXPR, sizetype, t,
13939 size_int (-boundary))));
13940 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
13941 gimplify_and_add (t, pre_p);
13943 /* Since it is zero-sized there's no increment for the item itself. */
13944 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
13945 return build_va_arg_indirect_ref (valist_tmp);
13948 if (DEFAULT_ABI != ABI_V4)
13950 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
13952 tree elem_type = TREE_TYPE (type);
13953 machine_mode elem_mode = TYPE_MODE (elem_type);
13954 int elem_size = GET_MODE_SIZE (elem_mode);
13956 if (elem_size < UNITS_PER_WORD)
13958 tree real_part, imag_part;
13959 gimple_seq post = NULL;
13961 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
13962 &post);
13963 /* Copy the value into a temporary, lest the formal temporary
13964 be reused out from under us. */
13965 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
13966 gimple_seq_add_seq (pre_p, post);
13968 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
13969 post_p);
13971 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
13975 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
13978 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
13979 f_fpr = DECL_CHAIN (f_gpr);
13980 f_res = DECL_CHAIN (f_fpr);
13981 f_ovf = DECL_CHAIN (f_res);
13982 f_sav = DECL_CHAIN (f_ovf);
13984 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
13985 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
13986 f_fpr, NULL_TREE);
13987 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
13988 f_ovf, NULL_TREE);
13989 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
13990 f_sav, NULL_TREE);
13992 size = int_size_in_bytes (type);
13993 rsize = (size + 3) / 4;
13994 int pad = 4 * rsize - size;
13995 align = 1;
13997 machine_mode mode = TYPE_MODE (type);
13998 if (abi_v4_pass_in_fpr (mode))
14000 /* FP args go in FP registers, if present. */
14001 reg = fpr;
14002 n_reg = (size + 7) / 8;
14003 sav_ofs = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4) * 4;
14004 sav_scale = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4);
14005 if (mode != SFmode && mode != SDmode)
14006 align = 8;
14008 else
14010 /* Otherwise into GP registers. */
14011 reg = gpr;
14012 n_reg = rsize;
14013 sav_ofs = 0;
14014 sav_scale = 4;
14015 if (n_reg == 2)
14016 align = 8;
14019 /* Pull the value out of the saved registers.... */
14021 lab_over = NULL;
14022 addr = create_tmp_var (ptr_type_node, "addr");
14024 /* AltiVec vectors never go in registers when -mabi=altivec. */
14025 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
14026 align = 16;
14027 else
14029 lab_false = create_artificial_label (input_location);
14030 lab_over = create_artificial_label (input_location);
14032 /* Long long and SPE vectors are aligned in the registers.
14033 As are any other 2 gpr item such as complex int due to a
14034 historical mistake. */
14035 u = reg;
14036 if (n_reg == 2 && reg == gpr)
14038 regalign = 1;
14039 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
14040 build_int_cst (TREE_TYPE (reg), n_reg - 1));
14041 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
14042 unshare_expr (reg), u);
14044 /* _Decimal128 is passed in even/odd fpr pairs; the stored
14045 reg number is 0 for f1, so we want to make it odd. */
14046 else if (reg == fpr && mode == TDmode)
14048 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
14049 build_int_cst (TREE_TYPE (reg), 1));
14050 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
14053 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
14054 t = build2 (GE_EXPR, boolean_type_node, u, t);
14055 u = build1 (GOTO_EXPR, void_type_node, lab_false);
14056 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
14057 gimplify_and_add (t, pre_p);
14059 t = sav;
14060 if (sav_ofs)
14061 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
14063 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
14064 build_int_cst (TREE_TYPE (reg), n_reg));
14065 u = fold_convert (sizetype, u);
14066 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
14067 t = fold_build_pointer_plus (t, u);
14069 /* _Decimal32 varargs are located in the second word of the 64-bit
14070 FP register for 32-bit binaries. */
14071 if (TARGET_32BIT
14072 && TARGET_HARD_FLOAT && TARGET_FPRS
14073 && mode == SDmode)
14074 t = fold_build_pointer_plus_hwi (t, size);
14076 /* Args are passed right-aligned. */
14077 if (BYTES_BIG_ENDIAN)
14078 t = fold_build_pointer_plus_hwi (t, pad);
14080 gimplify_assign (addr, t, pre_p);
14082 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
14084 stmt = gimple_build_label (lab_false);
14085 gimple_seq_add_stmt (pre_p, stmt);
14087 if ((n_reg == 2 && !regalign) || n_reg > 2)
14089 /* Ensure that we don't find any more args in regs.
14090 Alignment has taken care of for special cases. */
14091 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
14095 /* ... otherwise out of the overflow area. */
14097 /* Care for on-stack alignment if needed. */
14098 t = ovf;
14099 if (align != 1)
14101 t = fold_build_pointer_plus_hwi (t, align - 1);
14102 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
14103 build_int_cst (TREE_TYPE (t), -align));
14106 /* Args are passed right-aligned. */
14107 if (BYTES_BIG_ENDIAN)
14108 t = fold_build_pointer_plus_hwi (t, pad);
14110 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
14112 gimplify_assign (unshare_expr (addr), t, pre_p);
14114 t = fold_build_pointer_plus_hwi (t, size);
14115 gimplify_assign (unshare_expr (ovf), t, pre_p);
14117 if (lab_over)
14119 stmt = gimple_build_label (lab_over);
14120 gimple_seq_add_stmt (pre_p, stmt);
14123 if (STRICT_ALIGNMENT
14124 && (TYPE_ALIGN (type)
14125 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
14127 /* The value (of type complex double, for example) may not be
14128 aligned in memory in the saved registers, so copy via a
14129 temporary. (This is the same code as used for SPARC.) */
14130 tree tmp = create_tmp_var (type, "va_arg_tmp");
14131 tree dest_addr = build_fold_addr_expr (tmp);
14133 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
14134 3, dest_addr, addr, size_int (rsize * 4));
14136 gimplify_and_add (copy, pre_p);
14137 addr = dest_addr;
14140 addr = fold_convert (ptrtype, addr);
14141 return build_va_arg_indirect_ref (addr);
14144 /* Builtins. */
14146 static void
14147 def_builtin (const char *name, tree type, enum rs6000_builtins code)
14149 tree t;
14150 unsigned classify = rs6000_builtin_info[(int)code].attr;
14151 const char *attr_string = "";
14153 gcc_assert (name != NULL);
14154 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
14156 if (rs6000_builtin_decls[(int)code])
14157 fatal_error (input_location,
14158 "internal error: builtin function %s already processed", name);
14160 rs6000_builtin_decls[(int)code] = t =
14161 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
14163 /* Set any special attributes. */
14164 if ((classify & RS6000_BTC_CONST) != 0)
14166 /* const function, function only depends on the inputs. */
14167 TREE_READONLY (t) = 1;
14168 TREE_NOTHROW (t) = 1;
14169 attr_string = ", const";
14171 else if ((classify & RS6000_BTC_PURE) != 0)
14173 /* pure function, function can read global memory, but does not set any
14174 external state. */
14175 DECL_PURE_P (t) = 1;
14176 TREE_NOTHROW (t) = 1;
14177 attr_string = ", pure";
14179 else if ((classify & RS6000_BTC_FP) != 0)
14181 /* Function is a math function. If rounding mode is on, then treat the
14182 function as not reading global memory, but it can have arbitrary side
14183 effects. If it is off, then assume the function is a const function.
14184 This mimics the ATTR_MATHFN_FPROUNDING attribute in
14185 builtin-attribute.def that is used for the math functions. */
14186 TREE_NOTHROW (t) = 1;
14187 if (flag_rounding_math)
14189 DECL_PURE_P (t) = 1;
14190 DECL_IS_NOVOPS (t) = 1;
14191 attr_string = ", fp, pure";
14193 else
14195 TREE_READONLY (t) = 1;
14196 attr_string = ", fp, const";
14199 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
14200 gcc_unreachable ();
14202 if (TARGET_DEBUG_BUILTIN)
14203 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
14204 (int)code, name, attr_string);
14207 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
14209 #undef RS6000_BUILTIN_0
14210 #undef RS6000_BUILTIN_1
14211 #undef RS6000_BUILTIN_2
14212 #undef RS6000_BUILTIN_3
14213 #undef RS6000_BUILTIN_A
14214 #undef RS6000_BUILTIN_D
14215 #undef RS6000_BUILTIN_E
14216 #undef RS6000_BUILTIN_H
14217 #undef RS6000_BUILTIN_P
14218 #undef RS6000_BUILTIN_Q
14219 #undef RS6000_BUILTIN_S
14220 #undef RS6000_BUILTIN_X
14222 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
14223 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
14224 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
14225 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
14226 { MASK, ICODE, NAME, ENUM },
14228 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
14229 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
14230 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
14231 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
14232 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
14233 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
14234 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
14235 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
14237 static const struct builtin_description bdesc_3arg[] =
14239 #include "rs6000-builtin.def"
14242 /* DST operations: void foo (void *, const int, const char). */
14244 #undef RS6000_BUILTIN_0
14245 #undef RS6000_BUILTIN_1
14246 #undef RS6000_BUILTIN_2
14247 #undef RS6000_BUILTIN_3
14248 #undef RS6000_BUILTIN_A
14249 #undef RS6000_BUILTIN_D
14250 #undef RS6000_BUILTIN_E
14251 #undef RS6000_BUILTIN_H
14252 #undef RS6000_BUILTIN_P
14253 #undef RS6000_BUILTIN_Q
14254 #undef RS6000_BUILTIN_S
14255 #undef RS6000_BUILTIN_X
14257 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
14258 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
14259 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
14260 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
14261 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
14262 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
14263 { MASK, ICODE, NAME, ENUM },
14265 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
14266 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
14267 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
14268 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
14269 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
14270 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
14272 static const struct builtin_description bdesc_dst[] =
14274 #include "rs6000-builtin.def"
14277 /* Simple binary operations: VECc = foo (VECa, VECb). */
14279 #undef RS6000_BUILTIN_0
14280 #undef RS6000_BUILTIN_1
14281 #undef RS6000_BUILTIN_2
14282 #undef RS6000_BUILTIN_3
14283 #undef RS6000_BUILTIN_A
14284 #undef RS6000_BUILTIN_D
14285 #undef RS6000_BUILTIN_E
14286 #undef RS6000_BUILTIN_H
14287 #undef RS6000_BUILTIN_P
14288 #undef RS6000_BUILTIN_Q
14289 #undef RS6000_BUILTIN_S
14290 #undef RS6000_BUILTIN_X
14292 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
14293 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
14294 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
14295 { MASK, ICODE, NAME, ENUM },
14297 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
14298 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
14299 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
14300 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
14301 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
14302 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
14303 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
14304 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
14305 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
14307 static const struct builtin_description bdesc_2arg[] =
14309 #include "rs6000-builtin.def"
14312 #undef RS6000_BUILTIN_0
14313 #undef RS6000_BUILTIN_1
14314 #undef RS6000_BUILTIN_2
14315 #undef RS6000_BUILTIN_3
14316 #undef RS6000_BUILTIN_A
14317 #undef RS6000_BUILTIN_D
14318 #undef RS6000_BUILTIN_E
14319 #undef RS6000_BUILTIN_H
14320 #undef RS6000_BUILTIN_P
14321 #undef RS6000_BUILTIN_Q
14322 #undef RS6000_BUILTIN_S
14323 #undef RS6000_BUILTIN_X
14325 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
14326 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
14327 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
14328 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
14329 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
14330 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
14331 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
14332 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
14333 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
14334 { MASK, ICODE, NAME, ENUM },
14336 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
14337 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
14338 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
14340 /* AltiVec predicates. */
14342 static const struct builtin_description bdesc_altivec_preds[] =
14344 #include "rs6000-builtin.def"
14347 /* SPE predicates. */
14348 #undef RS6000_BUILTIN_0
14349 #undef RS6000_BUILTIN_1
14350 #undef RS6000_BUILTIN_2
14351 #undef RS6000_BUILTIN_3
14352 #undef RS6000_BUILTIN_A
14353 #undef RS6000_BUILTIN_D
14354 #undef RS6000_BUILTIN_E
14355 #undef RS6000_BUILTIN_H
14356 #undef RS6000_BUILTIN_P
14357 #undef RS6000_BUILTIN_Q
14358 #undef RS6000_BUILTIN_S
14359 #undef RS6000_BUILTIN_X
14361 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
14362 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
14363 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
14364 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
14365 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
14366 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
14367 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
14368 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
14369 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
14370 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
14371 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
14372 { MASK, ICODE, NAME, ENUM },
14374 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
14376 static const struct builtin_description bdesc_spe_predicates[] =
14378 #include "rs6000-builtin.def"
14381 /* SPE evsel predicates. */
14382 #undef RS6000_BUILTIN_0
14383 #undef RS6000_BUILTIN_1
14384 #undef RS6000_BUILTIN_2
14385 #undef RS6000_BUILTIN_3
14386 #undef RS6000_BUILTIN_A
14387 #undef RS6000_BUILTIN_D
14388 #undef RS6000_BUILTIN_E
14389 #undef RS6000_BUILTIN_H
14390 #undef RS6000_BUILTIN_P
14391 #undef RS6000_BUILTIN_Q
14392 #undef RS6000_BUILTIN_S
14393 #undef RS6000_BUILTIN_X
14395 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
14396 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
14397 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
14398 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
14399 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
14400 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
14401 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
14402 { MASK, ICODE, NAME, ENUM },
14404 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
14405 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
14406 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
14407 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
14408 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
14410 static const struct builtin_description bdesc_spe_evsel[] =
14412 #include "rs6000-builtin.def"
14415 /* PAIRED predicates. */
14416 #undef RS6000_BUILTIN_0
14417 #undef RS6000_BUILTIN_1
14418 #undef RS6000_BUILTIN_2
14419 #undef RS6000_BUILTIN_3
14420 #undef RS6000_BUILTIN_A
14421 #undef RS6000_BUILTIN_D
14422 #undef RS6000_BUILTIN_E
14423 #undef RS6000_BUILTIN_H
14424 #undef RS6000_BUILTIN_P
14425 #undef RS6000_BUILTIN_Q
14426 #undef RS6000_BUILTIN_S
14427 #undef RS6000_BUILTIN_X
14429 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
14430 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
14431 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
14432 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
14433 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
14434 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
14435 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
14436 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
14437 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
14438 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
14439 { MASK, ICODE, NAME, ENUM },
14441 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
14442 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
14444 static const struct builtin_description bdesc_paired_preds[] =
14446 #include "rs6000-builtin.def"
14449 /* ABS* operations. */
14451 #undef RS6000_BUILTIN_0
14452 #undef RS6000_BUILTIN_1
14453 #undef RS6000_BUILTIN_2
14454 #undef RS6000_BUILTIN_3
14455 #undef RS6000_BUILTIN_A
14456 #undef RS6000_BUILTIN_D
14457 #undef RS6000_BUILTIN_E
14458 #undef RS6000_BUILTIN_H
14459 #undef RS6000_BUILTIN_P
14460 #undef RS6000_BUILTIN_Q
14461 #undef RS6000_BUILTIN_S
14462 #undef RS6000_BUILTIN_X
14464 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
14465 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
14466 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
14467 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
14468 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
14469 { MASK, ICODE, NAME, ENUM },
14471 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
14472 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
14473 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
14474 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
14475 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
14476 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
14477 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
14479 static const struct builtin_description bdesc_abs[] =
14481 #include "rs6000-builtin.def"
14484 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
14485 foo (VECa). */
14487 #undef RS6000_BUILTIN_0
14488 #undef RS6000_BUILTIN_1
14489 #undef RS6000_BUILTIN_2
14490 #undef RS6000_BUILTIN_3
14491 #undef RS6000_BUILTIN_A
14492 #undef RS6000_BUILTIN_D
14493 #undef RS6000_BUILTIN_E
14494 #undef RS6000_BUILTIN_H
14495 #undef RS6000_BUILTIN_P
14496 #undef RS6000_BUILTIN_Q
14497 #undef RS6000_BUILTIN_S
14498 #undef RS6000_BUILTIN_X
14500 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
14501 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
14502 { MASK, ICODE, NAME, ENUM },
14504 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
14505 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
14506 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
14507 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
14508 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
14509 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
14510 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
14511 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
14512 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
14513 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
14515 static const struct builtin_description bdesc_1arg[] =
14517 #include "rs6000-builtin.def"
14520 /* Simple no-argument operations: result = __builtin_darn_32 () */
14522 #undef RS6000_BUILTIN_0
14523 #undef RS6000_BUILTIN_1
14524 #undef RS6000_BUILTIN_2
14525 #undef RS6000_BUILTIN_3
14526 #undef RS6000_BUILTIN_A
14527 #undef RS6000_BUILTIN_D
14528 #undef RS6000_BUILTIN_E
14529 #undef RS6000_BUILTIN_H
14530 #undef RS6000_BUILTIN_P
14531 #undef RS6000_BUILTIN_Q
14532 #undef RS6000_BUILTIN_S
14533 #undef RS6000_BUILTIN_X
14535 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
14536 { MASK, ICODE, NAME, ENUM },
14538 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
14539 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
14540 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
14541 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
14542 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
14543 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
14544 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
14545 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
14546 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
14547 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
14548 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
14550 static const struct builtin_description bdesc_0arg[] =
14552 #include "rs6000-builtin.def"
14555 /* HTM builtins. */
14556 #undef RS6000_BUILTIN_0
14557 #undef RS6000_BUILTIN_1
14558 #undef RS6000_BUILTIN_2
14559 #undef RS6000_BUILTIN_3
14560 #undef RS6000_BUILTIN_A
14561 #undef RS6000_BUILTIN_D
14562 #undef RS6000_BUILTIN_E
14563 #undef RS6000_BUILTIN_H
14564 #undef RS6000_BUILTIN_P
14565 #undef RS6000_BUILTIN_Q
14566 #undef RS6000_BUILTIN_S
14567 #undef RS6000_BUILTIN_X
14569 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
14570 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
14571 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
14572 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
14573 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
14574 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
14575 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
14576 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
14577 { MASK, ICODE, NAME, ENUM },
14579 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
14580 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
14581 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
14582 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
14584 static const struct builtin_description bdesc_htm[] =
14586 #include "rs6000-builtin.def"
14589 #undef RS6000_BUILTIN_0
14590 #undef RS6000_BUILTIN_1
14591 #undef RS6000_BUILTIN_2
14592 #undef RS6000_BUILTIN_3
14593 #undef RS6000_BUILTIN_A
14594 #undef RS6000_BUILTIN_D
14595 #undef RS6000_BUILTIN_E
14596 #undef RS6000_BUILTIN_H
14597 #undef RS6000_BUILTIN_P
14598 #undef RS6000_BUILTIN_Q
14599 #undef RS6000_BUILTIN_S
14601 /* Return true if a builtin function is overloaded. */
14602 bool
14603 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
14605 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
14608 const char *
14609 rs6000_overloaded_builtin_name (enum rs6000_builtins fncode)
14611 return rs6000_builtin_info[(int)fncode].name;
14614 /* Expand an expression EXP that calls a builtin without arguments. */
14615 static rtx
14616 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
14618 rtx pat;
14619 machine_mode tmode = insn_data[icode].operand[0].mode;
14621 if (icode == CODE_FOR_nothing)
14622 /* Builtin not supported on this processor. */
14623 return 0;
14625 if (target == 0
14626 || GET_MODE (target) != tmode
14627 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14628 target = gen_reg_rtx (tmode);
14630 pat = GEN_FCN (icode) (target);
14631 if (! pat)
14632 return 0;
14633 emit_insn (pat);
14635 return target;
14639 static rtx
14640 rs6000_expand_mtfsf_builtin (enum insn_code icode, tree exp)
14642 rtx pat;
14643 tree arg0 = CALL_EXPR_ARG (exp, 0);
14644 tree arg1 = CALL_EXPR_ARG (exp, 1);
14645 rtx op0 = expand_normal (arg0);
14646 rtx op1 = expand_normal (arg1);
14647 machine_mode mode0 = insn_data[icode].operand[0].mode;
14648 machine_mode mode1 = insn_data[icode].operand[1].mode;
14650 if (icode == CODE_FOR_nothing)
14651 /* Builtin not supported on this processor. */
14652 return 0;
14654 /* If we got invalid arguments bail out before generating bad rtl. */
14655 if (arg0 == error_mark_node || arg1 == error_mark_node)
14656 return const0_rtx;
14658 if (GET_CODE (op0) != CONST_INT
14659 || INTVAL (op0) > 255
14660 || INTVAL (op0) < 0)
14662 error ("argument 1 must be an 8-bit field value");
14663 return const0_rtx;
14666 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14667 op0 = copy_to_mode_reg (mode0, op0);
14669 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
14670 op1 = copy_to_mode_reg (mode1, op1);
14672 pat = GEN_FCN (icode) (op0, op1);
14673 if (! pat)
14674 return const0_rtx;
14675 emit_insn (pat);
14677 return NULL_RTX;
14680 static rtx
14681 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
14683 rtx pat;
14684 tree arg0 = CALL_EXPR_ARG (exp, 0);
14685 rtx op0 = expand_normal (arg0);
14686 machine_mode tmode = insn_data[icode].operand[0].mode;
14687 machine_mode mode0 = insn_data[icode].operand[1].mode;
14689 if (icode == CODE_FOR_nothing)
14690 /* Builtin not supported on this processor. */
14691 return 0;
14693 /* If we got invalid arguments bail out before generating bad rtl. */
14694 if (arg0 == error_mark_node)
14695 return const0_rtx;
14697 if (icode == CODE_FOR_altivec_vspltisb
14698 || icode == CODE_FOR_altivec_vspltish
14699 || icode == CODE_FOR_altivec_vspltisw
14700 || icode == CODE_FOR_spe_evsplatfi
14701 || icode == CODE_FOR_spe_evsplati)
14703 /* Only allow 5-bit *signed* literals. */
14704 if (GET_CODE (op0) != CONST_INT
14705 || INTVAL (op0) > 15
14706 || INTVAL (op0) < -16)
14708 error ("argument 1 must be a 5-bit signed literal");
14709 return CONST0_RTX (tmode);
14713 if (target == 0
14714 || GET_MODE (target) != tmode
14715 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14716 target = gen_reg_rtx (tmode);
14718 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14719 op0 = copy_to_mode_reg (mode0, op0);
14721 pat = GEN_FCN (icode) (target, op0);
14722 if (! pat)
14723 return 0;
14724 emit_insn (pat);
14726 return target;
14729 static rtx
14730 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
14732 rtx pat, scratch1, scratch2;
14733 tree arg0 = CALL_EXPR_ARG (exp, 0);
14734 rtx op0 = expand_normal (arg0);
14735 machine_mode tmode = insn_data[icode].operand[0].mode;
14736 machine_mode mode0 = insn_data[icode].operand[1].mode;
14738 /* If we have invalid arguments, bail out before generating bad rtl. */
14739 if (arg0 == error_mark_node)
14740 return const0_rtx;
14742 if (target == 0
14743 || GET_MODE (target) != tmode
14744 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14745 target = gen_reg_rtx (tmode);
14747 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14748 op0 = copy_to_mode_reg (mode0, op0);
14750 scratch1 = gen_reg_rtx (mode0);
14751 scratch2 = gen_reg_rtx (mode0);
14753 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
14754 if (! pat)
14755 return 0;
14756 emit_insn (pat);
14758 return target;
14761 static rtx
14762 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
14764 rtx pat;
14765 tree arg0 = CALL_EXPR_ARG (exp, 0);
14766 tree arg1 = CALL_EXPR_ARG (exp, 1);
14767 rtx op0 = expand_normal (arg0);
14768 rtx op1 = expand_normal (arg1);
14769 machine_mode tmode = insn_data[icode].operand[0].mode;
14770 machine_mode mode0 = insn_data[icode].operand[1].mode;
14771 machine_mode mode1 = insn_data[icode].operand[2].mode;
14773 if (icode == CODE_FOR_nothing)
14774 /* Builtin not supported on this processor. */
14775 return 0;
14777 /* If we got invalid arguments bail out before generating bad rtl. */
14778 if (arg0 == error_mark_node || arg1 == error_mark_node)
14779 return const0_rtx;
14781 if (icode == CODE_FOR_altivec_vcfux
14782 || icode == CODE_FOR_altivec_vcfsx
14783 || icode == CODE_FOR_altivec_vctsxs
14784 || icode == CODE_FOR_altivec_vctuxs
14785 || icode == CODE_FOR_altivec_vspltb
14786 || icode == CODE_FOR_altivec_vsplth
14787 || icode == CODE_FOR_altivec_vspltw
14788 || icode == CODE_FOR_spe_evaddiw
14789 || icode == CODE_FOR_spe_evldd
14790 || icode == CODE_FOR_spe_evldh
14791 || icode == CODE_FOR_spe_evldw
14792 || icode == CODE_FOR_spe_evlhhesplat
14793 || icode == CODE_FOR_spe_evlhhossplat
14794 || icode == CODE_FOR_spe_evlhhousplat
14795 || icode == CODE_FOR_spe_evlwhe
14796 || icode == CODE_FOR_spe_evlwhos
14797 || icode == CODE_FOR_spe_evlwhou
14798 || icode == CODE_FOR_spe_evlwhsplat
14799 || icode == CODE_FOR_spe_evlwwsplat
14800 || icode == CODE_FOR_spe_evrlwi
14801 || icode == CODE_FOR_spe_evslwi
14802 || icode == CODE_FOR_spe_evsrwis
14803 || icode == CODE_FOR_spe_evsubifw
14804 || icode == CODE_FOR_spe_evsrwiu)
14806 /* Only allow 5-bit unsigned literals. */
14807 STRIP_NOPS (arg1);
14808 if (TREE_CODE (arg1) != INTEGER_CST
14809 || TREE_INT_CST_LOW (arg1) & ~0x1f)
14811 error ("argument 2 must be a 5-bit unsigned literal");
14812 return CONST0_RTX (tmode);
14815 else if (icode == CODE_FOR_dfptstsfi_eq_dd
14816 || icode == CODE_FOR_dfptstsfi_lt_dd
14817 || icode == CODE_FOR_dfptstsfi_gt_dd
14818 || icode == CODE_FOR_dfptstsfi_unordered_dd
14819 || icode == CODE_FOR_dfptstsfi_eq_td
14820 || icode == CODE_FOR_dfptstsfi_lt_td
14821 || icode == CODE_FOR_dfptstsfi_gt_td
14822 || icode == CODE_FOR_dfptstsfi_unordered_td)
14824 /* Only allow 6-bit unsigned literals. */
14825 STRIP_NOPS (arg0);
14826 if (TREE_CODE (arg0) != INTEGER_CST
14827 || !IN_RANGE (TREE_INT_CST_LOW (arg0), 0, 63))
14829 error ("argument 1 must be a 6-bit unsigned literal");
14830 return CONST0_RTX (tmode);
14833 else if (icode == CODE_FOR_xststdcdp
14834 || icode == CODE_FOR_xststdcsp
14835 || icode == CODE_FOR_xvtstdcdp
14836 || icode == CODE_FOR_xvtstdcsp)
14838 /* Only allow 7-bit unsigned literals. */
14839 STRIP_NOPS (arg1);
14840 if (TREE_CODE (arg1) != INTEGER_CST
14841 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 127))
14843 error ("argument 2 must be a 7-bit unsigned literal");
14844 return CONST0_RTX (tmode);
14848 if (target == 0
14849 || GET_MODE (target) != tmode
14850 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14851 target = gen_reg_rtx (tmode);
14853 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14854 op0 = copy_to_mode_reg (mode0, op0);
14855 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14856 op1 = copy_to_mode_reg (mode1, op1);
14858 pat = GEN_FCN (icode) (target, op0, op1);
14859 if (! pat)
14860 return 0;
14861 emit_insn (pat);
14863 return target;
14866 static rtx
14867 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
14869 rtx pat, scratch;
14870 tree cr6_form = CALL_EXPR_ARG (exp, 0);
14871 tree arg0 = CALL_EXPR_ARG (exp, 1);
14872 tree arg1 = CALL_EXPR_ARG (exp, 2);
14873 rtx op0 = expand_normal (arg0);
14874 rtx op1 = expand_normal (arg1);
14875 machine_mode tmode = SImode;
14876 machine_mode mode0 = insn_data[icode].operand[1].mode;
14877 machine_mode mode1 = insn_data[icode].operand[2].mode;
14878 int cr6_form_int;
14880 if (TREE_CODE (cr6_form) != INTEGER_CST)
14882 error ("argument 1 of __builtin_altivec_predicate must be a constant");
14883 return const0_rtx;
14885 else
14886 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
14888 gcc_assert (mode0 == mode1);
14890 /* If we have invalid arguments, bail out before generating bad rtl. */
14891 if (arg0 == error_mark_node || arg1 == error_mark_node)
14892 return const0_rtx;
14894 if (target == 0
14895 || GET_MODE (target) != tmode
14896 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14897 target = gen_reg_rtx (tmode);
14899 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14900 op0 = copy_to_mode_reg (mode0, op0);
14901 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14902 op1 = copy_to_mode_reg (mode1, op1);
14904 /* Note that for many of the relevant operations (e.g. cmpne or
14905 cmpeq) with float or double operands, it makes more sense for the
14906 mode of the allocated scratch register to select a vector of
14907 integer. But the choice to copy the mode of operand 0 was made
14908 long ago and there are no plans to change it. */
14909 scratch = gen_reg_rtx (mode0);
14911 pat = GEN_FCN (icode) (scratch, op0, op1);
14912 if (! pat)
14913 return 0;
14914 emit_insn (pat);
14916 /* The vec_any* and vec_all* predicates use the same opcodes for two
14917 different operations, but the bits in CR6 will be different
14918 depending on what information we want. So we have to play tricks
14919 with CR6 to get the right bits out.
14921 If you think this is disgusting, look at the specs for the
14922 AltiVec predicates. */
14924 switch (cr6_form_int)
14926 case 0:
14927 emit_insn (gen_cr6_test_for_zero (target));
14928 break;
14929 case 1:
14930 emit_insn (gen_cr6_test_for_zero_reverse (target));
14931 break;
14932 case 2:
14933 emit_insn (gen_cr6_test_for_lt (target));
14934 break;
14935 case 3:
14936 emit_insn (gen_cr6_test_for_lt_reverse (target));
14937 break;
14938 default:
14939 error ("argument 1 of __builtin_altivec_predicate is out of range");
14940 break;
14943 return target;
14946 static rtx
14947 paired_expand_lv_builtin (enum insn_code icode, tree exp, rtx target)
14949 rtx pat, addr;
14950 tree arg0 = CALL_EXPR_ARG (exp, 0);
14951 tree arg1 = CALL_EXPR_ARG (exp, 1);
14952 machine_mode tmode = insn_data[icode].operand[0].mode;
14953 machine_mode mode0 = Pmode;
14954 machine_mode mode1 = Pmode;
14955 rtx op0 = expand_normal (arg0);
14956 rtx op1 = expand_normal (arg1);
14958 if (icode == CODE_FOR_nothing)
14959 /* Builtin not supported on this processor. */
14960 return 0;
14962 /* If we got invalid arguments bail out before generating bad rtl. */
14963 if (arg0 == error_mark_node || arg1 == error_mark_node)
14964 return const0_rtx;
14966 if (target == 0
14967 || GET_MODE (target) != tmode
14968 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14969 target = gen_reg_rtx (tmode);
14971 op1 = copy_to_mode_reg (mode1, op1);
14973 if (op0 == const0_rtx)
14975 addr = gen_rtx_MEM (tmode, op1);
14977 else
14979 op0 = copy_to_mode_reg (mode0, op0);
14980 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op0, op1));
14983 pat = GEN_FCN (icode) (target, addr);
14985 if (! pat)
14986 return 0;
14987 emit_insn (pat);
14989 return target;
14992 /* Return a constant vector for use as a little-endian permute control vector
14993 to reverse the order of elements of the given vector mode. */
14994 static rtx
14995 swap_selector_for_mode (machine_mode mode)
14997 /* These are little endian vectors, so their elements are reversed
14998 from what you would normally expect for a permute control vector. */
14999 unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
15000 unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
15001 unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
15002 unsigned int swap16[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
15003 unsigned int *swaparray, i;
15004 rtx perm[16];
15006 switch (mode)
15008 case V2DFmode:
15009 case V2DImode:
15010 swaparray = swap2;
15011 break;
15012 case V4SFmode:
15013 case V4SImode:
15014 swaparray = swap4;
15015 break;
15016 case V8HImode:
15017 swaparray = swap8;
15018 break;
15019 case V16QImode:
15020 swaparray = swap16;
15021 break;
15022 default:
15023 gcc_unreachable ();
15026 for (i = 0; i < 16; ++i)
15027 perm[i] = GEN_INT (swaparray[i]);
15029 return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm)));
15032 /* Generate code for an "lvxl", or "lve*x" built-in for a little endian target
15033 with -maltivec=be specified. Issue the load followed by an element-
15034 reversing permute. */
15035 void
15036 altivec_expand_lvx_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
15038 rtx tmp = gen_reg_rtx (mode);
15039 rtx load = gen_rtx_SET (tmp, op1);
15040 rtx lvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
15041 rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, load, lvx));
15042 rtx sel = swap_selector_for_mode (mode);
15043 rtx vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, tmp, tmp, sel), UNSPEC_VPERM);
15045 gcc_assert (REG_P (op0));
15046 emit_insn (par);
15047 emit_insn (gen_rtx_SET (op0, vperm));
15050 /* Generate code for a "stvxl" built-in for a little endian target with
15051 -maltivec=be specified. Issue the store preceded by an element-reversing
15052 permute. */
15053 void
15054 altivec_expand_stvx_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
15056 rtx tmp = gen_reg_rtx (mode);
15057 rtx store = gen_rtx_SET (op0, tmp);
15058 rtx stvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
15059 rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, store, stvx));
15060 rtx sel = swap_selector_for_mode (mode);
15061 rtx vperm;
15063 gcc_assert (REG_P (op1));
15064 vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
15065 emit_insn (gen_rtx_SET (tmp, vperm));
15066 emit_insn (par);
15069 /* Generate code for a "stve*x" built-in for a little endian target with -maltivec=be
15070 specified. Issue the store preceded by an element-reversing permute. */
15071 void
15072 altivec_expand_stvex_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
15074 machine_mode inner_mode = GET_MODE_INNER (mode);
15075 rtx tmp = gen_reg_rtx (mode);
15076 rtx stvx = gen_rtx_UNSPEC (inner_mode, gen_rtvec (1, tmp), unspec);
15077 rtx sel = swap_selector_for_mode (mode);
15078 rtx vperm;
15080 gcc_assert (REG_P (op1));
15081 vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
15082 emit_insn (gen_rtx_SET (tmp, vperm));
15083 emit_insn (gen_rtx_SET (op0, stvx));
15086 static rtx
15087 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
15089 rtx pat, addr;
15090 tree arg0 = CALL_EXPR_ARG (exp, 0);
15091 tree arg1 = CALL_EXPR_ARG (exp, 1);
15092 machine_mode tmode = insn_data[icode].operand[0].mode;
15093 machine_mode mode0 = Pmode;
15094 machine_mode mode1 = Pmode;
15095 rtx op0 = expand_normal (arg0);
15096 rtx op1 = expand_normal (arg1);
15098 if (icode == CODE_FOR_nothing)
15099 /* Builtin not supported on this processor. */
15100 return 0;
15102 /* If we got invalid arguments bail out before generating bad rtl. */
15103 if (arg0 == error_mark_node || arg1 == error_mark_node)
15104 return const0_rtx;
15106 if (target == 0
15107 || GET_MODE (target) != tmode
15108 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15109 target = gen_reg_rtx (tmode);
15111 op1 = copy_to_mode_reg (mode1, op1);
15113 /* For LVX, express the RTL accurately by ANDing the address with -16.
15114 LVXL and LVE*X expand to use UNSPECs to hide their special behavior,
15115 so the raw address is fine. */
15116 if (icode == CODE_FOR_altivec_lvx_v2df_2op
15117 || icode == CODE_FOR_altivec_lvx_v2di_2op
15118 || icode == CODE_FOR_altivec_lvx_v4sf_2op
15119 || icode == CODE_FOR_altivec_lvx_v4si_2op
15120 || icode == CODE_FOR_altivec_lvx_v8hi_2op
15121 || icode == CODE_FOR_altivec_lvx_v16qi_2op)
15123 rtx rawaddr;
15124 if (op0 == const0_rtx)
15125 rawaddr = op1;
15126 else
15128 op0 = copy_to_mode_reg (mode0, op0);
15129 rawaddr = gen_rtx_PLUS (Pmode, op1, op0);
15131 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
15132 addr = gen_rtx_MEM (blk ? BLKmode : tmode, addr);
15134 /* For -maltivec=be, emit the load and follow it up with a
15135 permute to swap the elements. */
15136 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
15138 rtx temp = gen_reg_rtx (tmode);
15139 emit_insn (gen_rtx_SET (temp, addr));
15141 rtx sel = swap_selector_for_mode (tmode);
15142 rtx vperm = gen_rtx_UNSPEC (tmode, gen_rtvec (3, temp, temp, sel),
15143 UNSPEC_VPERM);
15144 emit_insn (gen_rtx_SET (target, vperm));
15146 else
15147 emit_insn (gen_rtx_SET (target, addr));
15149 else
15151 if (op0 == const0_rtx)
15152 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
15153 else
15155 op0 = copy_to_mode_reg (mode0, op0);
15156 addr = gen_rtx_MEM (blk ? BLKmode : tmode,
15157 gen_rtx_PLUS (Pmode, op1, op0));
15160 pat = GEN_FCN (icode) (target, addr);
15161 if (! pat)
15162 return 0;
15163 emit_insn (pat);
15166 return target;
15169 static rtx
15170 spe_expand_stv_builtin (enum insn_code icode, tree exp)
15172 tree arg0 = CALL_EXPR_ARG (exp, 0);
15173 tree arg1 = CALL_EXPR_ARG (exp, 1);
15174 tree arg2 = CALL_EXPR_ARG (exp, 2);
15175 rtx op0 = expand_normal (arg0);
15176 rtx op1 = expand_normal (arg1);
15177 rtx op2 = expand_normal (arg2);
15178 rtx pat;
15179 machine_mode mode0 = insn_data[icode].operand[0].mode;
15180 machine_mode mode1 = insn_data[icode].operand[1].mode;
15181 machine_mode mode2 = insn_data[icode].operand[2].mode;
15183 /* Invalid arguments. Bail before doing anything stoopid! */
15184 if (arg0 == error_mark_node
15185 || arg1 == error_mark_node
15186 || arg2 == error_mark_node)
15187 return const0_rtx;
15189 if (! (*insn_data[icode].operand[2].predicate) (op0, mode2))
15190 op0 = copy_to_mode_reg (mode2, op0);
15191 if (! (*insn_data[icode].operand[0].predicate) (op1, mode0))
15192 op1 = copy_to_mode_reg (mode0, op1);
15193 if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
15194 op2 = copy_to_mode_reg (mode1, op2);
15196 pat = GEN_FCN (icode) (op1, op2, op0);
15197 if (pat)
15198 emit_insn (pat);
15199 return NULL_RTX;
15202 static rtx
15203 paired_expand_stv_builtin (enum insn_code icode, tree exp)
15205 tree arg0 = CALL_EXPR_ARG (exp, 0);
15206 tree arg1 = CALL_EXPR_ARG (exp, 1);
15207 tree arg2 = CALL_EXPR_ARG (exp, 2);
15208 rtx op0 = expand_normal (arg0);
15209 rtx op1 = expand_normal (arg1);
15210 rtx op2 = expand_normal (arg2);
15211 rtx pat, addr;
15212 machine_mode tmode = insn_data[icode].operand[0].mode;
15213 machine_mode mode1 = Pmode;
15214 machine_mode mode2 = Pmode;
15216 /* Invalid arguments. Bail before doing anything stoopid! */
15217 if (arg0 == error_mark_node
15218 || arg1 == error_mark_node
15219 || arg2 == error_mark_node)
15220 return const0_rtx;
15222 if (! (*insn_data[icode].operand[1].predicate) (op0, tmode))
15223 op0 = copy_to_mode_reg (tmode, op0);
15225 op2 = copy_to_mode_reg (mode2, op2);
15227 if (op1 == const0_rtx)
15229 addr = gen_rtx_MEM (tmode, op2);
15231 else
15233 op1 = copy_to_mode_reg (mode1, op1);
15234 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
15237 pat = GEN_FCN (icode) (addr, op0);
15238 if (pat)
15239 emit_insn (pat);
15240 return NULL_RTX;
15243 static rtx
15244 altivec_expand_stxvl_builtin (enum insn_code icode, tree exp)
15246 rtx pat;
15247 tree arg0 = CALL_EXPR_ARG (exp, 0);
15248 tree arg1 = CALL_EXPR_ARG (exp, 1);
15249 tree arg2 = CALL_EXPR_ARG (exp, 2);
15250 rtx op0 = expand_normal (arg0);
15251 rtx op1 = expand_normal (arg1);
15252 rtx op2 = expand_normal (arg2);
15253 machine_mode mode0 = insn_data[icode].operand[0].mode;
15254 machine_mode mode1 = insn_data[icode].operand[1].mode;
15255 machine_mode mode2 = insn_data[icode].operand[2].mode;
15257 if (icode == CODE_FOR_nothing)
15258 /* Builtin not supported on this processor. */
15259 return NULL_RTX;
15261 /* If we got invalid arguments bail out before generating bad rtl. */
15262 if (arg0 == error_mark_node
15263 || arg1 == error_mark_node
15264 || arg2 == error_mark_node)
15265 return NULL_RTX;
15267 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15268 op0 = copy_to_mode_reg (mode0, op0);
15269 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
15270 op1 = copy_to_mode_reg (mode1, op1);
15271 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
15272 op2 = copy_to_mode_reg (mode2, op2);
15274 pat = GEN_FCN (icode) (op0, op1, op2);
15275 if (pat)
15276 emit_insn (pat);
15278 return NULL_RTX;
15281 static rtx
15282 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
15284 tree arg0 = CALL_EXPR_ARG (exp, 0);
15285 tree arg1 = CALL_EXPR_ARG (exp, 1);
15286 tree arg2 = CALL_EXPR_ARG (exp, 2);
15287 rtx op0 = expand_normal (arg0);
15288 rtx op1 = expand_normal (arg1);
15289 rtx op2 = expand_normal (arg2);
15290 rtx pat, addr, rawaddr;
15291 machine_mode tmode = insn_data[icode].operand[0].mode;
15292 machine_mode smode = insn_data[icode].operand[1].mode;
15293 machine_mode mode1 = Pmode;
15294 machine_mode mode2 = Pmode;
15296 /* Invalid arguments. Bail before doing anything stoopid! */
15297 if (arg0 == error_mark_node
15298 || arg1 == error_mark_node
15299 || arg2 == error_mark_node)
15300 return const0_rtx;
15302 op2 = copy_to_mode_reg (mode2, op2);
15304 /* For STVX, express the RTL accurately by ANDing the address with -16.
15305 STVXL and STVE*X expand to use UNSPECs to hide their special behavior,
15306 so the raw address is fine. */
15307 if (icode == CODE_FOR_altivec_stvx_v2df_2op
15308 || icode == CODE_FOR_altivec_stvx_v2di_2op
15309 || icode == CODE_FOR_altivec_stvx_v4sf_2op
15310 || icode == CODE_FOR_altivec_stvx_v4si_2op
15311 || icode == CODE_FOR_altivec_stvx_v8hi_2op
15312 || icode == CODE_FOR_altivec_stvx_v16qi_2op)
15314 if (op1 == const0_rtx)
15315 rawaddr = op2;
15316 else
15318 op1 = copy_to_mode_reg (mode1, op1);
15319 rawaddr = gen_rtx_PLUS (Pmode, op2, op1);
15322 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
15323 addr = gen_rtx_MEM (tmode, addr);
15325 op0 = copy_to_mode_reg (tmode, op0);
15327 /* For -maltivec=be, emit a permute to swap the elements, followed
15328 by the store. */
15329 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
15331 rtx temp = gen_reg_rtx (tmode);
15332 rtx sel = swap_selector_for_mode (tmode);
15333 rtx vperm = gen_rtx_UNSPEC (tmode, gen_rtvec (3, op0, op0, sel),
15334 UNSPEC_VPERM);
15335 emit_insn (gen_rtx_SET (temp, vperm));
15336 emit_insn (gen_rtx_SET (addr, temp));
15338 else
15339 emit_insn (gen_rtx_SET (addr, op0));
15341 else
15343 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
15344 op0 = copy_to_mode_reg (smode, op0);
15346 if (op1 == const0_rtx)
15347 addr = gen_rtx_MEM (tmode, op2);
15348 else
15350 op1 = copy_to_mode_reg (mode1, op1);
15351 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op2, op1));
15354 pat = GEN_FCN (icode) (addr, op0);
15355 if (pat)
15356 emit_insn (pat);
15359 return NULL_RTX;
15362 /* Return the appropriate SPR number associated with the given builtin. */
15363 static inline HOST_WIDE_INT
15364 htm_spr_num (enum rs6000_builtins code)
15366 if (code == HTM_BUILTIN_GET_TFHAR
15367 || code == HTM_BUILTIN_SET_TFHAR)
15368 return TFHAR_SPR;
15369 else if (code == HTM_BUILTIN_GET_TFIAR
15370 || code == HTM_BUILTIN_SET_TFIAR)
15371 return TFIAR_SPR;
15372 else if (code == HTM_BUILTIN_GET_TEXASR
15373 || code == HTM_BUILTIN_SET_TEXASR)
15374 return TEXASR_SPR;
15375 gcc_assert (code == HTM_BUILTIN_GET_TEXASRU
15376 || code == HTM_BUILTIN_SET_TEXASRU);
15377 return TEXASRU_SPR;
15380 /* Return the appropriate SPR regno associated with the given builtin. */
15381 static inline HOST_WIDE_INT
15382 htm_spr_regno (enum rs6000_builtins code)
15384 if (code == HTM_BUILTIN_GET_TFHAR
15385 || code == HTM_BUILTIN_SET_TFHAR)
15386 return TFHAR_REGNO;
15387 else if (code == HTM_BUILTIN_GET_TFIAR
15388 || code == HTM_BUILTIN_SET_TFIAR)
15389 return TFIAR_REGNO;
15390 gcc_assert (code == HTM_BUILTIN_GET_TEXASR
15391 || code == HTM_BUILTIN_SET_TEXASR
15392 || code == HTM_BUILTIN_GET_TEXASRU
15393 || code == HTM_BUILTIN_SET_TEXASRU);
15394 return TEXASR_REGNO;
15397 /* Return the correct ICODE value depending on whether we are
15398 setting or reading the HTM SPRs. */
15399 static inline enum insn_code
15400 rs6000_htm_spr_icode (bool nonvoid)
15402 if (nonvoid)
15403 return (TARGET_POWERPC64) ? CODE_FOR_htm_mfspr_di : CODE_FOR_htm_mfspr_si;
15404 else
15405 return (TARGET_POWERPC64) ? CODE_FOR_htm_mtspr_di : CODE_FOR_htm_mtspr_si;
15408 /* Expand the HTM builtin in EXP and store the result in TARGET.
15409 Store true in *EXPANDEDP if we found a builtin to expand. */
15410 static rtx
15411 htm_expand_builtin (tree exp, rtx target, bool * expandedp)
15413 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15414 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
15415 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15416 const struct builtin_description *d;
15417 size_t i;
15419 *expandedp = true;
15421 if (!TARGET_POWERPC64
15422 && (fcode == HTM_BUILTIN_TABORTDC
15423 || fcode == HTM_BUILTIN_TABORTDCI))
15425 size_t uns_fcode = (size_t)fcode;
15426 const char *name = rs6000_builtin_info[uns_fcode].name;
15427 error ("builtin %s is only valid in 64-bit mode", name);
15428 return const0_rtx;
15431 /* Expand the HTM builtins. */
15432 d = bdesc_htm;
15433 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
15434 if (d->code == fcode)
15436 rtx op[MAX_HTM_OPERANDS], pat;
15437 int nopnds = 0;
15438 tree arg;
15439 call_expr_arg_iterator iter;
15440 unsigned attr = rs6000_builtin_info[fcode].attr;
15441 enum insn_code icode = d->icode;
15442 const struct insn_operand_data *insn_op;
15443 bool uses_spr = (attr & RS6000_BTC_SPR);
15444 rtx cr = NULL_RTX;
15446 if (uses_spr)
15447 icode = rs6000_htm_spr_icode (nonvoid);
15448 insn_op = &insn_data[icode].operand[0];
15450 if (nonvoid)
15452 machine_mode tmode = (uses_spr) ? insn_op->mode : SImode;
15453 if (!target
15454 || GET_MODE (target) != tmode
15455 || (uses_spr && !(*insn_op->predicate) (target, tmode)))
15456 target = gen_reg_rtx (tmode);
15457 if (uses_spr)
15458 op[nopnds++] = target;
15461 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
15463 if (arg == error_mark_node || nopnds >= MAX_HTM_OPERANDS)
15464 return const0_rtx;
15466 insn_op = &insn_data[icode].operand[nopnds];
15468 op[nopnds] = expand_normal (arg);
15470 if (!(*insn_op->predicate) (op[nopnds], insn_op->mode))
15472 if (!strcmp (insn_op->constraint, "n"))
15474 int arg_num = (nonvoid) ? nopnds : nopnds + 1;
15475 if (!CONST_INT_P (op[nopnds]))
15476 error ("argument %d must be an unsigned literal", arg_num);
15477 else
15478 error ("argument %d is an unsigned literal that is "
15479 "out of range", arg_num);
15480 return const0_rtx;
15482 op[nopnds] = copy_to_mode_reg (insn_op->mode, op[nopnds]);
15485 nopnds++;
15488 /* Handle the builtins for extended mnemonics. These accept
15489 no arguments, but map to builtins that take arguments. */
15490 switch (fcode)
15492 case HTM_BUILTIN_TENDALL: /* Alias for: tend. 1 */
15493 case HTM_BUILTIN_TRESUME: /* Alias for: tsr. 1 */
15494 op[nopnds++] = GEN_INT (1);
15495 if (flag_checking)
15496 attr |= RS6000_BTC_UNARY;
15497 break;
15498 case HTM_BUILTIN_TSUSPEND: /* Alias for: tsr. 0 */
15499 op[nopnds++] = GEN_INT (0);
15500 if (flag_checking)
15501 attr |= RS6000_BTC_UNARY;
15502 break;
15503 default:
15504 break;
15507 /* If this builtin accesses SPRs, then pass in the appropriate
15508 SPR number and SPR regno as the last two operands. */
15509 if (uses_spr)
15511 machine_mode mode = (TARGET_POWERPC64) ? DImode : SImode;
15512 op[nopnds++] = gen_rtx_CONST_INT (mode, htm_spr_num (fcode));
15513 op[nopnds++] = gen_rtx_REG (mode, htm_spr_regno (fcode));
15515 /* If this builtin accesses a CR, then pass in a scratch
15516 CR as the last operand. */
15517 else if (attr & RS6000_BTC_CR)
15518 { cr = gen_reg_rtx (CCmode);
15519 op[nopnds++] = cr;
15522 if (flag_checking)
15524 int expected_nopnds = 0;
15525 if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_UNARY)
15526 expected_nopnds = 1;
15527 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_BINARY)
15528 expected_nopnds = 2;
15529 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_TERNARY)
15530 expected_nopnds = 3;
15531 if (!(attr & RS6000_BTC_VOID))
15532 expected_nopnds += 1;
15533 if (uses_spr)
15534 expected_nopnds += 2;
15536 gcc_assert (nopnds == expected_nopnds
15537 && nopnds <= MAX_HTM_OPERANDS);
15540 switch (nopnds)
15542 case 1:
15543 pat = GEN_FCN (icode) (op[0]);
15544 break;
15545 case 2:
15546 pat = GEN_FCN (icode) (op[0], op[1]);
15547 break;
15548 case 3:
15549 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
15550 break;
15551 case 4:
15552 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
15553 break;
15554 default:
15555 gcc_unreachable ();
15557 if (!pat)
15558 return NULL_RTX;
15559 emit_insn (pat);
15561 if (attr & RS6000_BTC_CR)
15563 if (fcode == HTM_BUILTIN_TBEGIN)
15565 /* Emit code to set TARGET to true or false depending on
15566 whether the tbegin. instruction successfully or failed
15567 to start a transaction. We do this by placing the 1's
15568 complement of CR's EQ bit into TARGET. */
15569 rtx scratch = gen_reg_rtx (SImode);
15570 emit_insn (gen_rtx_SET (scratch,
15571 gen_rtx_EQ (SImode, cr,
15572 const0_rtx)));
15573 emit_insn (gen_rtx_SET (target,
15574 gen_rtx_XOR (SImode, scratch,
15575 GEN_INT (1))));
15577 else
15579 /* Emit code to copy the 4-bit condition register field
15580 CR into the least significant end of register TARGET. */
15581 rtx scratch1 = gen_reg_rtx (SImode);
15582 rtx scratch2 = gen_reg_rtx (SImode);
15583 rtx subreg = simplify_gen_subreg (CCmode, scratch1, SImode, 0);
15584 emit_insn (gen_movcc (subreg, cr));
15585 emit_insn (gen_lshrsi3 (scratch2, scratch1, GEN_INT (28)));
15586 emit_insn (gen_andsi3 (target, scratch2, GEN_INT (0xf)));
15590 if (nonvoid)
15591 return target;
15592 return const0_rtx;
15595 *expandedp = false;
15596 return NULL_RTX;
15599 /* Expand the CPU builtin in FCODE and store the result in TARGET. */
15601 static rtx
15602 cpu_expand_builtin (enum rs6000_builtins fcode, tree exp ATTRIBUTE_UNUSED,
15603 rtx target)
15605 /* __builtin_cpu_init () is a nop, so expand to nothing. */
15606 if (fcode == RS6000_BUILTIN_CPU_INIT)
15607 return const0_rtx;
15609 if (target == 0 || GET_MODE (target) != SImode)
15610 target = gen_reg_rtx (SImode);
15612 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
15613 tree arg = TREE_OPERAND (CALL_EXPR_ARG (exp, 0), 0);
15614 if (TREE_CODE (arg) != STRING_CST)
15616 error ("builtin %s only accepts a string argument",
15617 rs6000_builtin_info[(size_t) fcode].name);
15618 return const0_rtx;
15621 if (fcode == RS6000_BUILTIN_CPU_IS)
15623 const char *cpu = TREE_STRING_POINTER (arg);
15624 rtx cpuid = NULL_RTX;
15625 for (size_t i = 0; i < ARRAY_SIZE (cpu_is_info); i++)
15626 if (strcmp (cpu, cpu_is_info[i].cpu) == 0)
15628 /* The CPUID value in the TCB is offset by _DL_FIRST_PLATFORM. */
15629 cpuid = GEN_INT (cpu_is_info[i].cpuid + _DL_FIRST_PLATFORM);
15630 break;
15632 if (cpuid == NULL_RTX)
15634 /* Invalid CPU argument. */
15635 error ("cpu %s is an invalid argument to builtin %s",
15636 cpu, rs6000_builtin_info[(size_t) fcode].name);
15637 return const0_rtx;
15640 rtx platform = gen_reg_rtx (SImode);
15641 rtx tcbmem = gen_const_mem (SImode,
15642 gen_rtx_PLUS (Pmode,
15643 gen_rtx_REG (Pmode, TLS_REGNUM),
15644 GEN_INT (TCB_PLATFORM_OFFSET)));
15645 emit_move_insn (platform, tcbmem);
15646 emit_insn (gen_eqsi3 (target, platform, cpuid));
15648 else if (fcode == RS6000_BUILTIN_CPU_SUPPORTS)
15650 const char *hwcap = TREE_STRING_POINTER (arg);
15651 rtx mask = NULL_RTX;
15652 int hwcap_offset;
15653 for (size_t i = 0; i < ARRAY_SIZE (cpu_supports_info); i++)
15654 if (strcmp (hwcap, cpu_supports_info[i].hwcap) == 0)
15656 mask = GEN_INT (cpu_supports_info[i].mask);
15657 hwcap_offset = TCB_HWCAP_OFFSET (cpu_supports_info[i].id);
15658 break;
15660 if (mask == NULL_RTX)
15662 /* Invalid HWCAP argument. */
15663 error ("hwcap %s is an invalid argument to builtin %s",
15664 hwcap, rs6000_builtin_info[(size_t) fcode].name);
15665 return const0_rtx;
15668 rtx tcb_hwcap = gen_reg_rtx (SImode);
15669 rtx tcbmem = gen_const_mem (SImode,
15670 gen_rtx_PLUS (Pmode,
15671 gen_rtx_REG (Pmode, TLS_REGNUM),
15672 GEN_INT (hwcap_offset)));
15673 emit_move_insn (tcb_hwcap, tcbmem);
15674 rtx scratch1 = gen_reg_rtx (SImode);
15675 emit_insn (gen_rtx_SET (scratch1, gen_rtx_AND (SImode, tcb_hwcap, mask)));
15676 rtx scratch2 = gen_reg_rtx (SImode);
15677 emit_insn (gen_eqsi3 (scratch2, scratch1, const0_rtx));
15678 emit_insn (gen_rtx_SET (target, gen_rtx_XOR (SImode, scratch2, const1_rtx)));
15681 /* Record that we have expanded a CPU builtin, so that we can later
15682 emit a reference to the special symbol exported by LIBC to ensure we
15683 do not link against an old LIBC that doesn't support this feature. */
15684 cpu_builtin_p = true;
15686 #else
15687 /* For old LIBCs, always return FALSE. */
15688 emit_move_insn (target, GEN_INT (0));
15689 #endif /* TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */
15691 return target;
15694 static rtx
15695 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
15697 rtx pat;
15698 tree arg0 = CALL_EXPR_ARG (exp, 0);
15699 tree arg1 = CALL_EXPR_ARG (exp, 1);
15700 tree arg2 = CALL_EXPR_ARG (exp, 2);
15701 rtx op0 = expand_normal (arg0);
15702 rtx op1 = expand_normal (arg1);
15703 rtx op2 = expand_normal (arg2);
15704 machine_mode tmode = insn_data[icode].operand[0].mode;
15705 machine_mode mode0 = insn_data[icode].operand[1].mode;
15706 machine_mode mode1 = insn_data[icode].operand[2].mode;
15707 machine_mode mode2 = insn_data[icode].operand[3].mode;
15709 if (icode == CODE_FOR_nothing)
15710 /* Builtin not supported on this processor. */
15711 return 0;
15713 /* If we got invalid arguments bail out before generating bad rtl. */
15714 if (arg0 == error_mark_node
15715 || arg1 == error_mark_node
15716 || arg2 == error_mark_node)
15717 return const0_rtx;
15719 /* Check and prepare argument depending on the instruction code.
15721 Note that a switch statement instead of the sequence of tests
15722 would be incorrect as many of the CODE_FOR values could be
15723 CODE_FOR_nothing and that would yield multiple alternatives
15724 with identical values. We'd never reach here at runtime in
15725 this case. */
15726 if (icode == CODE_FOR_altivec_vsldoi_v4sf
15727 || icode == CODE_FOR_altivec_vsldoi_v2df
15728 || icode == CODE_FOR_altivec_vsldoi_v4si
15729 || icode == CODE_FOR_altivec_vsldoi_v8hi
15730 || icode == CODE_FOR_altivec_vsldoi_v16qi)
15732 /* Only allow 4-bit unsigned literals. */
15733 STRIP_NOPS (arg2);
15734 if (TREE_CODE (arg2) != INTEGER_CST
15735 || TREE_INT_CST_LOW (arg2) & ~0xf)
15737 error ("argument 3 must be a 4-bit unsigned literal");
15738 return CONST0_RTX (tmode);
15741 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
15742 || icode == CODE_FOR_vsx_xxpermdi_v2di
15743 || icode == CODE_FOR_vsx_xxpermdi_v2df_be
15744 || icode == CODE_FOR_vsx_xxpermdi_v2di_be
15745 || icode == CODE_FOR_vsx_xxpermdi_v1ti
15746 || icode == CODE_FOR_vsx_xxpermdi_v4sf
15747 || icode == CODE_FOR_vsx_xxpermdi_v4si
15748 || icode == CODE_FOR_vsx_xxpermdi_v8hi
15749 || icode == CODE_FOR_vsx_xxpermdi_v16qi
15750 || icode == CODE_FOR_vsx_xxsldwi_v16qi
15751 || icode == CODE_FOR_vsx_xxsldwi_v8hi
15752 || icode == CODE_FOR_vsx_xxsldwi_v4si
15753 || icode == CODE_FOR_vsx_xxsldwi_v4sf
15754 || icode == CODE_FOR_vsx_xxsldwi_v2di
15755 || icode == CODE_FOR_vsx_xxsldwi_v2df)
15757 /* Only allow 2-bit unsigned literals. */
15758 STRIP_NOPS (arg2);
15759 if (TREE_CODE (arg2) != INTEGER_CST
15760 || TREE_INT_CST_LOW (arg2) & ~0x3)
15762 error ("argument 3 must be a 2-bit unsigned literal");
15763 return CONST0_RTX (tmode);
15766 else if (icode == CODE_FOR_vsx_set_v2df
15767 || icode == CODE_FOR_vsx_set_v2di
15768 || icode == CODE_FOR_bcdadd
15769 || icode == CODE_FOR_bcdadd_lt
15770 || icode == CODE_FOR_bcdadd_eq
15771 || icode == CODE_FOR_bcdadd_gt
15772 || icode == CODE_FOR_bcdsub
15773 || icode == CODE_FOR_bcdsub_lt
15774 || icode == CODE_FOR_bcdsub_eq
15775 || icode == CODE_FOR_bcdsub_gt)
15777 /* Only allow 1-bit unsigned literals. */
15778 STRIP_NOPS (arg2);
15779 if (TREE_CODE (arg2) != INTEGER_CST
15780 || TREE_INT_CST_LOW (arg2) & ~0x1)
15782 error ("argument 3 must be a 1-bit unsigned literal");
15783 return CONST0_RTX (tmode);
15786 else if (icode == CODE_FOR_dfp_ddedpd_dd
15787 || icode == CODE_FOR_dfp_ddedpd_td)
15789 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
15790 STRIP_NOPS (arg0);
15791 if (TREE_CODE (arg0) != INTEGER_CST
15792 || TREE_INT_CST_LOW (arg2) & ~0x3)
15794 error ("argument 1 must be 0 or 2");
15795 return CONST0_RTX (tmode);
15798 else if (icode == CODE_FOR_dfp_denbcd_dd
15799 || icode == CODE_FOR_dfp_denbcd_td)
15801 /* Only allow 1-bit unsigned literals. */
15802 STRIP_NOPS (arg0);
15803 if (TREE_CODE (arg0) != INTEGER_CST
15804 || TREE_INT_CST_LOW (arg0) & ~0x1)
15806 error ("argument 1 must be a 1-bit unsigned literal");
15807 return CONST0_RTX (tmode);
15810 else if (icode == CODE_FOR_dfp_dscli_dd
15811 || icode == CODE_FOR_dfp_dscli_td
15812 || icode == CODE_FOR_dfp_dscri_dd
15813 || icode == CODE_FOR_dfp_dscri_td)
15815 /* Only allow 6-bit unsigned literals. */
15816 STRIP_NOPS (arg1);
15817 if (TREE_CODE (arg1) != INTEGER_CST
15818 || TREE_INT_CST_LOW (arg1) & ~0x3f)
15820 error ("argument 2 must be a 6-bit unsigned literal");
15821 return CONST0_RTX (tmode);
15824 else if (icode == CODE_FOR_crypto_vshasigmaw
15825 || icode == CODE_FOR_crypto_vshasigmad)
15827 /* Check whether the 2nd and 3rd arguments are integer constants and in
15828 range and prepare arguments. */
15829 STRIP_NOPS (arg1);
15830 if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (arg1, 2))
15832 error ("argument 2 must be 0 or 1");
15833 return CONST0_RTX (tmode);
15836 STRIP_NOPS (arg2);
15837 if (TREE_CODE (arg2) != INTEGER_CST || wi::geu_p (arg2, 16))
15839 error ("argument 3 must be in the range 0..15");
15840 return CONST0_RTX (tmode);
15844 if (target == 0
15845 || GET_MODE (target) != tmode
15846 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15847 target = gen_reg_rtx (tmode);
15849 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15850 op0 = copy_to_mode_reg (mode0, op0);
15851 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
15852 op1 = copy_to_mode_reg (mode1, op1);
15853 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
15854 op2 = copy_to_mode_reg (mode2, op2);
15856 if (TARGET_PAIRED_FLOAT && icode == CODE_FOR_selv2sf4)
15857 pat = GEN_FCN (icode) (target, op0, op1, op2, CONST0_RTX (SFmode));
15858 else
15859 pat = GEN_FCN (icode) (target, op0, op1, op2);
15860 if (! pat)
15861 return 0;
15862 emit_insn (pat);
15864 return target;
15867 /* Expand the lvx builtins. */
15868 static rtx
15869 altivec_expand_ld_builtin (tree exp, rtx target, bool *expandedp)
15871 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15872 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
15873 tree arg0;
15874 machine_mode tmode, mode0;
15875 rtx pat, op0;
15876 enum insn_code icode;
15878 switch (fcode)
15880 case ALTIVEC_BUILTIN_LD_INTERNAL_16qi:
15881 icode = CODE_FOR_vector_altivec_load_v16qi;
15882 break;
15883 case ALTIVEC_BUILTIN_LD_INTERNAL_8hi:
15884 icode = CODE_FOR_vector_altivec_load_v8hi;
15885 break;
15886 case ALTIVEC_BUILTIN_LD_INTERNAL_4si:
15887 icode = CODE_FOR_vector_altivec_load_v4si;
15888 break;
15889 case ALTIVEC_BUILTIN_LD_INTERNAL_4sf:
15890 icode = CODE_FOR_vector_altivec_load_v4sf;
15891 break;
15892 case ALTIVEC_BUILTIN_LD_INTERNAL_2df:
15893 icode = CODE_FOR_vector_altivec_load_v2df;
15894 break;
15895 case ALTIVEC_BUILTIN_LD_INTERNAL_2di:
15896 icode = CODE_FOR_vector_altivec_load_v2di;
15897 break;
15898 case ALTIVEC_BUILTIN_LD_INTERNAL_1ti:
15899 icode = CODE_FOR_vector_altivec_load_v1ti;
15900 break;
15901 default:
15902 *expandedp = false;
15903 return NULL_RTX;
15906 *expandedp = true;
15908 arg0 = CALL_EXPR_ARG (exp, 0);
15909 op0 = expand_normal (arg0);
15910 tmode = insn_data[icode].operand[0].mode;
15911 mode0 = insn_data[icode].operand[1].mode;
15913 if (target == 0
15914 || GET_MODE (target) != tmode
15915 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15916 target = gen_reg_rtx (tmode);
15918 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15919 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
15921 pat = GEN_FCN (icode) (target, op0);
15922 if (! pat)
15923 return 0;
15924 emit_insn (pat);
15925 return target;
15928 /* Expand the stvx builtins. */
15929 static rtx
15930 altivec_expand_st_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
15931 bool *expandedp)
15933 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15934 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
15935 tree arg0, arg1;
15936 machine_mode mode0, mode1;
15937 rtx pat, op0, op1;
15938 enum insn_code icode;
15940 switch (fcode)
15942 case ALTIVEC_BUILTIN_ST_INTERNAL_16qi:
15943 icode = CODE_FOR_vector_altivec_store_v16qi;
15944 break;
15945 case ALTIVEC_BUILTIN_ST_INTERNAL_8hi:
15946 icode = CODE_FOR_vector_altivec_store_v8hi;
15947 break;
15948 case ALTIVEC_BUILTIN_ST_INTERNAL_4si:
15949 icode = CODE_FOR_vector_altivec_store_v4si;
15950 break;
15951 case ALTIVEC_BUILTIN_ST_INTERNAL_4sf:
15952 icode = CODE_FOR_vector_altivec_store_v4sf;
15953 break;
15954 case ALTIVEC_BUILTIN_ST_INTERNAL_2df:
15955 icode = CODE_FOR_vector_altivec_store_v2df;
15956 break;
15957 case ALTIVEC_BUILTIN_ST_INTERNAL_2di:
15958 icode = CODE_FOR_vector_altivec_store_v2di;
15959 break;
15960 case ALTIVEC_BUILTIN_ST_INTERNAL_1ti:
15961 icode = CODE_FOR_vector_altivec_store_v1ti;
15962 break;
15963 default:
15964 *expandedp = false;
15965 return NULL_RTX;
15968 arg0 = CALL_EXPR_ARG (exp, 0);
15969 arg1 = CALL_EXPR_ARG (exp, 1);
15970 op0 = expand_normal (arg0);
15971 op1 = expand_normal (arg1);
15972 mode0 = insn_data[icode].operand[0].mode;
15973 mode1 = insn_data[icode].operand[1].mode;
15975 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15976 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
15977 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
15978 op1 = copy_to_mode_reg (mode1, op1);
15980 pat = GEN_FCN (icode) (op0, op1);
15981 if (pat)
15982 emit_insn (pat);
15984 *expandedp = true;
15985 return NULL_RTX;
15988 /* Expand the dst builtins. */
15989 static rtx
15990 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
15991 bool *expandedp)
15993 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15994 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15995 tree arg0, arg1, arg2;
15996 machine_mode mode0, mode1;
15997 rtx pat, op0, op1, op2;
15998 const struct builtin_description *d;
15999 size_t i;
16001 *expandedp = false;
16003 /* Handle DST variants. */
16004 d = bdesc_dst;
16005 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
16006 if (d->code == fcode)
16008 arg0 = CALL_EXPR_ARG (exp, 0);
16009 arg1 = CALL_EXPR_ARG (exp, 1);
16010 arg2 = CALL_EXPR_ARG (exp, 2);
16011 op0 = expand_normal (arg0);
16012 op1 = expand_normal (arg1);
16013 op2 = expand_normal (arg2);
16014 mode0 = insn_data[d->icode].operand[0].mode;
16015 mode1 = insn_data[d->icode].operand[1].mode;
16017 /* Invalid arguments, bail out before generating bad rtl. */
16018 if (arg0 == error_mark_node
16019 || arg1 == error_mark_node
16020 || arg2 == error_mark_node)
16021 return const0_rtx;
16023 *expandedp = true;
16024 STRIP_NOPS (arg2);
16025 if (TREE_CODE (arg2) != INTEGER_CST
16026 || TREE_INT_CST_LOW (arg2) & ~0x3)
16028 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
16029 return const0_rtx;
16032 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
16033 op0 = copy_to_mode_reg (Pmode, op0);
16034 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
16035 op1 = copy_to_mode_reg (mode1, op1);
16037 pat = GEN_FCN (d->icode) (op0, op1, op2);
16038 if (pat != 0)
16039 emit_insn (pat);
16041 return NULL_RTX;
16044 return NULL_RTX;
16047 /* Expand vec_init builtin. */
16048 static rtx
16049 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
16051 machine_mode tmode = TYPE_MODE (type);
16052 machine_mode inner_mode = GET_MODE_INNER (tmode);
16053 int i, n_elt = GET_MODE_NUNITS (tmode);
16055 gcc_assert (VECTOR_MODE_P (tmode));
16056 gcc_assert (n_elt == call_expr_nargs (exp));
16058 if (!target || !register_operand (target, tmode))
16059 target = gen_reg_rtx (tmode);
16061 /* If we have a vector compromised of a single element, such as V1TImode, do
16062 the initialization directly. */
16063 if (n_elt == 1 && GET_MODE_SIZE (tmode) == GET_MODE_SIZE (inner_mode))
16065 rtx x = expand_normal (CALL_EXPR_ARG (exp, 0));
16066 emit_move_insn (target, gen_lowpart (tmode, x));
16068 else
16070 rtvec v = rtvec_alloc (n_elt);
16072 for (i = 0; i < n_elt; ++i)
16074 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
16075 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
16078 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
16081 return target;
16084 /* Return the integer constant in ARG. Constrain it to be in the range
16085 of the subparts of VEC_TYPE; issue an error if not. */
16087 static int
16088 get_element_number (tree vec_type, tree arg)
16090 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
16092 if (!tree_fits_uhwi_p (arg)
16093 || (elt = tree_to_uhwi (arg), elt > max))
16095 error ("selector must be an integer constant in the range 0..%wi", max);
16096 return 0;
16099 return elt;
16102 /* Expand vec_set builtin. */
16103 static rtx
16104 altivec_expand_vec_set_builtin (tree exp)
16106 machine_mode tmode, mode1;
16107 tree arg0, arg1, arg2;
16108 int elt;
16109 rtx op0, op1;
16111 arg0 = CALL_EXPR_ARG (exp, 0);
16112 arg1 = CALL_EXPR_ARG (exp, 1);
16113 arg2 = CALL_EXPR_ARG (exp, 2);
16115 tmode = TYPE_MODE (TREE_TYPE (arg0));
16116 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
16117 gcc_assert (VECTOR_MODE_P (tmode));
16119 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
16120 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
16121 elt = get_element_number (TREE_TYPE (arg0), arg2);
16123 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
16124 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
16126 op0 = force_reg (tmode, op0);
16127 op1 = force_reg (mode1, op1);
16129 rs6000_expand_vector_set (op0, op1, elt);
16131 return op0;
16134 /* Expand vec_ext builtin. */
16135 static rtx
16136 altivec_expand_vec_ext_builtin (tree exp, rtx target)
16138 machine_mode tmode, mode0;
16139 tree arg0, arg1;
16140 rtx op0;
16141 rtx op1;
16143 arg0 = CALL_EXPR_ARG (exp, 0);
16144 arg1 = CALL_EXPR_ARG (exp, 1);
16146 op0 = expand_normal (arg0);
16147 op1 = expand_normal (arg1);
16149 /* Call get_element_number to validate arg1 if it is a constant. */
16150 if (TREE_CODE (arg1) == INTEGER_CST)
16151 (void) get_element_number (TREE_TYPE (arg0), arg1);
16153 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
16154 mode0 = TYPE_MODE (TREE_TYPE (arg0));
16155 gcc_assert (VECTOR_MODE_P (mode0));
16157 op0 = force_reg (mode0, op0);
16159 if (optimize || !target || !register_operand (target, tmode))
16160 target = gen_reg_rtx (tmode);
16162 rs6000_expand_vector_extract (target, op0, op1);
16164 return target;
16167 /* Expand the builtin in EXP and store the result in TARGET. Store
16168 true in *EXPANDEDP if we found a builtin to expand. */
16169 static rtx
16170 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
16172 const struct builtin_description *d;
16173 size_t i;
16174 enum insn_code icode;
16175 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
16176 tree arg0, arg1, arg2;
16177 rtx op0, pat;
16178 machine_mode tmode, mode0;
16179 enum rs6000_builtins fcode
16180 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
16182 if (rs6000_overloaded_builtin_p (fcode))
16184 *expandedp = true;
16185 error ("unresolved overload for Altivec builtin %qF", fndecl);
16187 /* Given it is invalid, just generate a normal call. */
16188 return expand_call (exp, target, false);
16191 target = altivec_expand_ld_builtin (exp, target, expandedp);
16192 if (*expandedp)
16193 return target;
16195 target = altivec_expand_st_builtin (exp, target, expandedp);
16196 if (*expandedp)
16197 return target;
16199 target = altivec_expand_dst_builtin (exp, target, expandedp);
16200 if (*expandedp)
16201 return target;
16203 *expandedp = true;
16205 switch (fcode)
16207 case ALTIVEC_BUILTIN_STVX_V2DF:
16208 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df_2op, exp);
16209 case ALTIVEC_BUILTIN_STVX_V2DI:
16210 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di_2op, exp);
16211 case ALTIVEC_BUILTIN_STVX_V4SF:
16212 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf_2op, exp);
16213 case ALTIVEC_BUILTIN_STVX:
16214 case ALTIVEC_BUILTIN_STVX_V4SI:
16215 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si_2op, exp);
16216 case ALTIVEC_BUILTIN_STVX_V8HI:
16217 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi_2op, exp);
16218 case ALTIVEC_BUILTIN_STVX_V16QI:
16219 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi_2op, exp);
16220 case ALTIVEC_BUILTIN_STVEBX:
16221 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
16222 case ALTIVEC_BUILTIN_STVEHX:
16223 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
16224 case ALTIVEC_BUILTIN_STVEWX:
16225 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
16226 case ALTIVEC_BUILTIN_STVXL_V2DF:
16227 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df, exp);
16228 case ALTIVEC_BUILTIN_STVXL_V2DI:
16229 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di, exp);
16230 case ALTIVEC_BUILTIN_STVXL_V4SF:
16231 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf, exp);
16232 case ALTIVEC_BUILTIN_STVXL:
16233 case ALTIVEC_BUILTIN_STVXL_V4SI:
16234 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si, exp);
16235 case ALTIVEC_BUILTIN_STVXL_V8HI:
16236 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi, exp);
16237 case ALTIVEC_BUILTIN_STVXL_V16QI:
16238 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi, exp);
16240 case ALTIVEC_BUILTIN_STVLX:
16241 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
16242 case ALTIVEC_BUILTIN_STVLXL:
16243 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
16244 case ALTIVEC_BUILTIN_STVRX:
16245 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
16246 case ALTIVEC_BUILTIN_STVRXL:
16247 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
16249 case P9V_BUILTIN_STXVL:
16250 return altivec_expand_stxvl_builtin (CODE_FOR_stxvl, exp);
16252 case VSX_BUILTIN_STXVD2X_V1TI:
16253 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti, exp);
16254 case VSX_BUILTIN_STXVD2X_V2DF:
16255 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
16256 case VSX_BUILTIN_STXVD2X_V2DI:
16257 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
16258 case VSX_BUILTIN_STXVW4X_V4SF:
16259 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
16260 case VSX_BUILTIN_STXVW4X_V4SI:
16261 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
16262 case VSX_BUILTIN_STXVW4X_V8HI:
16263 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
16264 case VSX_BUILTIN_STXVW4X_V16QI:
16265 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
16267 /* For the following on big endian, it's ok to use any appropriate
16268 unaligned-supporting store, so use a generic expander. For
16269 little-endian, the exact element-reversing instruction must
16270 be used. */
16271 case VSX_BUILTIN_ST_ELEMREV_V2DF:
16273 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2df
16274 : CODE_FOR_vsx_st_elemrev_v2df);
16275 return altivec_expand_stv_builtin (code, exp);
16277 case VSX_BUILTIN_ST_ELEMREV_V2DI:
16279 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2di
16280 : CODE_FOR_vsx_st_elemrev_v2di);
16281 return altivec_expand_stv_builtin (code, exp);
16283 case VSX_BUILTIN_ST_ELEMREV_V4SF:
16285 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4sf
16286 : CODE_FOR_vsx_st_elemrev_v4sf);
16287 return altivec_expand_stv_builtin (code, exp);
16289 case VSX_BUILTIN_ST_ELEMREV_V4SI:
16291 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4si
16292 : CODE_FOR_vsx_st_elemrev_v4si);
16293 return altivec_expand_stv_builtin (code, exp);
16295 case VSX_BUILTIN_ST_ELEMREV_V8HI:
16297 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v8hi
16298 : CODE_FOR_vsx_st_elemrev_v8hi);
16299 return altivec_expand_stv_builtin (code, exp);
16301 case VSX_BUILTIN_ST_ELEMREV_V16QI:
16303 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v16qi
16304 : CODE_FOR_vsx_st_elemrev_v16qi);
16305 return altivec_expand_stv_builtin (code, exp);
16308 case ALTIVEC_BUILTIN_MFVSCR:
16309 icode = CODE_FOR_altivec_mfvscr;
16310 tmode = insn_data[icode].operand[0].mode;
16312 if (target == 0
16313 || GET_MODE (target) != tmode
16314 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
16315 target = gen_reg_rtx (tmode);
16317 pat = GEN_FCN (icode) (target);
16318 if (! pat)
16319 return 0;
16320 emit_insn (pat);
16321 return target;
16323 case ALTIVEC_BUILTIN_MTVSCR:
16324 icode = CODE_FOR_altivec_mtvscr;
16325 arg0 = CALL_EXPR_ARG (exp, 0);
16326 op0 = expand_normal (arg0);
16327 mode0 = insn_data[icode].operand[0].mode;
16329 /* If we got invalid arguments bail out before generating bad rtl. */
16330 if (arg0 == error_mark_node)
16331 return const0_rtx;
16333 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
16334 op0 = copy_to_mode_reg (mode0, op0);
16336 pat = GEN_FCN (icode) (op0);
16337 if (pat)
16338 emit_insn (pat);
16339 return NULL_RTX;
16341 case ALTIVEC_BUILTIN_DSSALL:
16342 emit_insn (gen_altivec_dssall ());
16343 return NULL_RTX;
16345 case ALTIVEC_BUILTIN_DSS:
16346 icode = CODE_FOR_altivec_dss;
16347 arg0 = CALL_EXPR_ARG (exp, 0);
16348 STRIP_NOPS (arg0);
16349 op0 = expand_normal (arg0);
16350 mode0 = insn_data[icode].operand[0].mode;
16352 /* If we got invalid arguments bail out before generating bad rtl. */
16353 if (arg0 == error_mark_node)
16354 return const0_rtx;
16356 if (TREE_CODE (arg0) != INTEGER_CST
16357 || TREE_INT_CST_LOW (arg0) & ~0x3)
16359 error ("argument to dss must be a 2-bit unsigned literal");
16360 return const0_rtx;
16363 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
16364 op0 = copy_to_mode_reg (mode0, op0);
16366 emit_insn (gen_altivec_dss (op0));
16367 return NULL_RTX;
16369 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
16370 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
16371 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
16372 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
16373 case VSX_BUILTIN_VEC_INIT_V2DF:
16374 case VSX_BUILTIN_VEC_INIT_V2DI:
16375 case VSX_BUILTIN_VEC_INIT_V1TI:
16376 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
16378 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
16379 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
16380 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
16381 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
16382 case VSX_BUILTIN_VEC_SET_V2DF:
16383 case VSX_BUILTIN_VEC_SET_V2DI:
16384 case VSX_BUILTIN_VEC_SET_V1TI:
16385 return altivec_expand_vec_set_builtin (exp);
16387 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
16388 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
16389 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
16390 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
16391 case VSX_BUILTIN_VEC_EXT_V2DF:
16392 case VSX_BUILTIN_VEC_EXT_V2DI:
16393 case VSX_BUILTIN_VEC_EXT_V1TI:
16394 return altivec_expand_vec_ext_builtin (exp, target);
16396 case P9V_BUILTIN_VEXTRACT4B:
16397 case P9V_BUILTIN_VEC_VEXTRACT4B:
16398 arg1 = CALL_EXPR_ARG (exp, 1);
16399 STRIP_NOPS (arg1);
16401 /* Generate a normal call if it is invalid. */
16402 if (arg1 == error_mark_node)
16403 return expand_call (exp, target, false);
16405 if (TREE_CODE (arg1) != INTEGER_CST || TREE_INT_CST_LOW (arg1) > 12)
16407 error ("second argument to vec_vextract4b must be 0..12");
16408 return expand_call (exp, target, false);
16410 break;
16412 case P9V_BUILTIN_VINSERT4B:
16413 case P9V_BUILTIN_VINSERT4B_DI:
16414 case P9V_BUILTIN_VEC_VINSERT4B:
16415 arg2 = CALL_EXPR_ARG (exp, 2);
16416 STRIP_NOPS (arg2);
16418 /* Generate a normal call if it is invalid. */
16419 if (arg2 == error_mark_node)
16420 return expand_call (exp, target, false);
16422 if (TREE_CODE (arg2) != INTEGER_CST || TREE_INT_CST_LOW (arg2) > 12)
16424 error ("third argument to vec_vinsert4b must be 0..12");
16425 return expand_call (exp, target, false);
16427 break;
16429 default:
16430 break;
16431 /* Fall through. */
16434 /* Expand abs* operations. */
16435 d = bdesc_abs;
16436 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
16437 if (d->code == fcode)
16438 return altivec_expand_abs_builtin (d->icode, exp, target);
16440 /* Expand the AltiVec predicates. */
16441 d = bdesc_altivec_preds;
16442 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
16443 if (d->code == fcode)
16444 return altivec_expand_predicate_builtin (d->icode, exp, target);
16446 /* LV* are funky. We initialized them differently. */
16447 switch (fcode)
16449 case ALTIVEC_BUILTIN_LVSL:
16450 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
16451 exp, target, false);
16452 case ALTIVEC_BUILTIN_LVSR:
16453 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
16454 exp, target, false);
16455 case ALTIVEC_BUILTIN_LVEBX:
16456 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
16457 exp, target, false);
16458 case ALTIVEC_BUILTIN_LVEHX:
16459 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
16460 exp, target, false);
16461 case ALTIVEC_BUILTIN_LVEWX:
16462 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
16463 exp, target, false);
16464 case ALTIVEC_BUILTIN_LVXL_V2DF:
16465 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df,
16466 exp, target, false);
16467 case ALTIVEC_BUILTIN_LVXL_V2DI:
16468 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di,
16469 exp, target, false);
16470 case ALTIVEC_BUILTIN_LVXL_V4SF:
16471 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf,
16472 exp, target, false);
16473 case ALTIVEC_BUILTIN_LVXL:
16474 case ALTIVEC_BUILTIN_LVXL_V4SI:
16475 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si,
16476 exp, target, false);
16477 case ALTIVEC_BUILTIN_LVXL_V8HI:
16478 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi,
16479 exp, target, false);
16480 case ALTIVEC_BUILTIN_LVXL_V16QI:
16481 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi,
16482 exp, target, false);
16483 case ALTIVEC_BUILTIN_LVX_V2DF:
16484 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df_2op,
16485 exp, target, false);
16486 case ALTIVEC_BUILTIN_LVX_V2DI:
16487 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di_2op,
16488 exp, target, false);
16489 case ALTIVEC_BUILTIN_LVX_V4SF:
16490 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf_2op,
16491 exp, target, false);
16492 case ALTIVEC_BUILTIN_LVX:
16493 case ALTIVEC_BUILTIN_LVX_V4SI:
16494 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si_2op,
16495 exp, target, false);
16496 case ALTIVEC_BUILTIN_LVX_V8HI:
16497 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi_2op,
16498 exp, target, false);
16499 case ALTIVEC_BUILTIN_LVX_V16QI:
16500 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi_2op,
16501 exp, target, false);
16502 case ALTIVEC_BUILTIN_LVLX:
16503 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
16504 exp, target, true);
16505 case ALTIVEC_BUILTIN_LVLXL:
16506 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
16507 exp, target, true);
16508 case ALTIVEC_BUILTIN_LVRX:
16509 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
16510 exp, target, true);
16511 case ALTIVEC_BUILTIN_LVRXL:
16512 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
16513 exp, target, true);
16514 case VSX_BUILTIN_LXVD2X_V1TI:
16515 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti,
16516 exp, target, false);
16517 case VSX_BUILTIN_LXVD2X_V2DF:
16518 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
16519 exp, target, false);
16520 case VSX_BUILTIN_LXVD2X_V2DI:
16521 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
16522 exp, target, false);
16523 case VSX_BUILTIN_LXVW4X_V4SF:
16524 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
16525 exp, target, false);
16526 case VSX_BUILTIN_LXVW4X_V4SI:
16527 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
16528 exp, target, false);
16529 case VSX_BUILTIN_LXVW4X_V8HI:
16530 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
16531 exp, target, false);
16532 case VSX_BUILTIN_LXVW4X_V16QI:
16533 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
16534 exp, target, false);
16535 /* For the following on big endian, it's ok to use any appropriate
16536 unaligned-supporting load, so use a generic expander. For
16537 little-endian, the exact element-reversing instruction must
16538 be used. */
16539 case VSX_BUILTIN_LD_ELEMREV_V2DF:
16541 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2df
16542 : CODE_FOR_vsx_ld_elemrev_v2df);
16543 return altivec_expand_lv_builtin (code, exp, target, false);
16545 case VSX_BUILTIN_LD_ELEMREV_V2DI:
16547 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2di
16548 : CODE_FOR_vsx_ld_elemrev_v2di);
16549 return altivec_expand_lv_builtin (code, exp, target, false);
16551 case VSX_BUILTIN_LD_ELEMREV_V4SF:
16553 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4sf
16554 : CODE_FOR_vsx_ld_elemrev_v4sf);
16555 return altivec_expand_lv_builtin (code, exp, target, false);
16557 case VSX_BUILTIN_LD_ELEMREV_V4SI:
16559 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4si
16560 : CODE_FOR_vsx_ld_elemrev_v4si);
16561 return altivec_expand_lv_builtin (code, exp, target, false);
16563 case VSX_BUILTIN_LD_ELEMREV_V8HI:
16565 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v8hi
16566 : CODE_FOR_vsx_ld_elemrev_v8hi);
16567 return altivec_expand_lv_builtin (code, exp, target, false);
16569 case VSX_BUILTIN_LD_ELEMREV_V16QI:
16571 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v16qi
16572 : CODE_FOR_vsx_ld_elemrev_v16qi);
16573 return altivec_expand_lv_builtin (code, exp, target, false);
16575 break;
16576 default:
16577 break;
16578 /* Fall through. */
16581 *expandedp = false;
16582 return NULL_RTX;
16585 /* Expand the builtin in EXP and store the result in TARGET. Store
16586 true in *EXPANDEDP if we found a builtin to expand. */
16587 static rtx
16588 paired_expand_builtin (tree exp, rtx target, bool * expandedp)
16590 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
16591 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
16592 const struct builtin_description *d;
16593 size_t i;
16595 *expandedp = true;
16597 switch (fcode)
16599 case PAIRED_BUILTIN_STX:
16600 return paired_expand_stv_builtin (CODE_FOR_paired_stx, exp);
16601 case PAIRED_BUILTIN_LX:
16602 return paired_expand_lv_builtin (CODE_FOR_paired_lx, exp, target);
16603 default:
16604 break;
16605 /* Fall through. */
16608 /* Expand the paired predicates. */
16609 d = bdesc_paired_preds;
16610 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); i++, d++)
16611 if (d->code == fcode)
16612 return paired_expand_predicate_builtin (d->icode, exp, target);
16614 *expandedp = false;
16615 return NULL_RTX;
16618 /* Binops that need to be initialized manually, but can be expanded
16619 automagically by rs6000_expand_binop_builtin. */
16620 static const struct builtin_description bdesc_2arg_spe[] =
16622 { RS6000_BTM_SPE, CODE_FOR_spe_evlddx, "__builtin_spe_evlddx", SPE_BUILTIN_EVLDDX },
16623 { RS6000_BTM_SPE, CODE_FOR_spe_evldwx, "__builtin_spe_evldwx", SPE_BUILTIN_EVLDWX },
16624 { RS6000_BTM_SPE, CODE_FOR_spe_evldhx, "__builtin_spe_evldhx", SPE_BUILTIN_EVLDHX },
16625 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhex, "__builtin_spe_evlwhex", SPE_BUILTIN_EVLWHEX },
16626 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhoux, "__builtin_spe_evlwhoux", SPE_BUILTIN_EVLWHOUX },
16627 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhosx, "__builtin_spe_evlwhosx", SPE_BUILTIN_EVLWHOSX },
16628 { RS6000_BTM_SPE, CODE_FOR_spe_evlwwsplatx, "__builtin_spe_evlwwsplatx", SPE_BUILTIN_EVLWWSPLATX },
16629 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhsplatx, "__builtin_spe_evlwhsplatx", SPE_BUILTIN_EVLWHSPLATX },
16630 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhesplatx, "__builtin_spe_evlhhesplatx", SPE_BUILTIN_EVLHHESPLATX },
16631 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhousplatx, "__builtin_spe_evlhhousplatx", SPE_BUILTIN_EVLHHOUSPLATX },
16632 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhossplatx, "__builtin_spe_evlhhossplatx", SPE_BUILTIN_EVLHHOSSPLATX },
16633 { RS6000_BTM_SPE, CODE_FOR_spe_evldd, "__builtin_spe_evldd", SPE_BUILTIN_EVLDD },
16634 { RS6000_BTM_SPE, CODE_FOR_spe_evldw, "__builtin_spe_evldw", SPE_BUILTIN_EVLDW },
16635 { RS6000_BTM_SPE, CODE_FOR_spe_evldh, "__builtin_spe_evldh", SPE_BUILTIN_EVLDH },
16636 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhe, "__builtin_spe_evlwhe", SPE_BUILTIN_EVLWHE },
16637 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhou, "__builtin_spe_evlwhou", SPE_BUILTIN_EVLWHOU },
16638 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhos, "__builtin_spe_evlwhos", SPE_BUILTIN_EVLWHOS },
16639 { RS6000_BTM_SPE, CODE_FOR_spe_evlwwsplat, "__builtin_spe_evlwwsplat", SPE_BUILTIN_EVLWWSPLAT },
16640 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhsplat, "__builtin_spe_evlwhsplat", SPE_BUILTIN_EVLWHSPLAT },
16641 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhesplat, "__builtin_spe_evlhhesplat", SPE_BUILTIN_EVLHHESPLAT },
16642 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhousplat, "__builtin_spe_evlhhousplat", SPE_BUILTIN_EVLHHOUSPLAT },
16643 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhossplat, "__builtin_spe_evlhhossplat", SPE_BUILTIN_EVLHHOSSPLAT }
16646 /* Expand the builtin in EXP and store the result in TARGET. Store
16647 true in *EXPANDEDP if we found a builtin to expand.
16649 This expands the SPE builtins that are not simple unary and binary
16650 operations. */
16651 static rtx
16652 spe_expand_builtin (tree exp, rtx target, bool *expandedp)
16654 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
16655 tree arg1, arg0;
16656 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
16657 enum insn_code icode;
16658 machine_mode tmode, mode0;
16659 rtx pat, op0;
16660 const struct builtin_description *d;
16661 size_t i;
16663 *expandedp = true;
16665 /* Syntax check for a 5-bit unsigned immediate. */
16666 switch (fcode)
16668 case SPE_BUILTIN_EVSTDD:
16669 case SPE_BUILTIN_EVSTDH:
16670 case SPE_BUILTIN_EVSTDW:
16671 case SPE_BUILTIN_EVSTWHE:
16672 case SPE_BUILTIN_EVSTWHO:
16673 case SPE_BUILTIN_EVSTWWE:
16674 case SPE_BUILTIN_EVSTWWO:
16675 arg1 = CALL_EXPR_ARG (exp, 2);
16676 if (TREE_CODE (arg1) != INTEGER_CST
16677 || TREE_INT_CST_LOW (arg1) & ~0x1f)
16679 error ("argument 2 must be a 5-bit unsigned literal");
16680 return const0_rtx;
16682 break;
16683 default:
16684 break;
16687 /* The evsplat*i instructions are not quite generic. */
16688 switch (fcode)
16690 case SPE_BUILTIN_EVSPLATFI:
16691 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplatfi,
16692 exp, target);
16693 case SPE_BUILTIN_EVSPLATI:
16694 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplati,
16695 exp, target);
16696 default:
16697 break;
16700 d = bdesc_2arg_spe;
16701 for (i = 0; i < ARRAY_SIZE (bdesc_2arg_spe); ++i, ++d)
16702 if (d->code == fcode)
16703 return rs6000_expand_binop_builtin (d->icode, exp, target);
16705 d = bdesc_spe_predicates;
16706 for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, ++d)
16707 if (d->code == fcode)
16708 return spe_expand_predicate_builtin (d->icode, exp, target);
16710 d = bdesc_spe_evsel;
16711 for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, ++d)
16712 if (d->code == fcode)
16713 return spe_expand_evsel_builtin (d->icode, exp, target);
16715 switch (fcode)
16717 case SPE_BUILTIN_EVSTDDX:
16718 return spe_expand_stv_builtin (CODE_FOR_spe_evstddx, exp);
16719 case SPE_BUILTIN_EVSTDHX:
16720 return spe_expand_stv_builtin (CODE_FOR_spe_evstdhx, exp);
16721 case SPE_BUILTIN_EVSTDWX:
16722 return spe_expand_stv_builtin (CODE_FOR_spe_evstdwx, exp);
16723 case SPE_BUILTIN_EVSTWHEX:
16724 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhex, exp);
16725 case SPE_BUILTIN_EVSTWHOX:
16726 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhox, exp);
16727 case SPE_BUILTIN_EVSTWWEX:
16728 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwex, exp);
16729 case SPE_BUILTIN_EVSTWWOX:
16730 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwox, exp);
16731 case SPE_BUILTIN_EVSTDD:
16732 return spe_expand_stv_builtin (CODE_FOR_spe_evstdd, exp);
16733 case SPE_BUILTIN_EVSTDH:
16734 return spe_expand_stv_builtin (CODE_FOR_spe_evstdh, exp);
16735 case SPE_BUILTIN_EVSTDW:
16736 return spe_expand_stv_builtin (CODE_FOR_spe_evstdw, exp);
16737 case SPE_BUILTIN_EVSTWHE:
16738 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhe, exp);
16739 case SPE_BUILTIN_EVSTWHO:
16740 return spe_expand_stv_builtin (CODE_FOR_spe_evstwho, exp);
16741 case SPE_BUILTIN_EVSTWWE:
16742 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwe, exp);
16743 case SPE_BUILTIN_EVSTWWO:
16744 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwo, exp);
16745 case SPE_BUILTIN_MFSPEFSCR:
16746 icode = CODE_FOR_spe_mfspefscr;
16747 tmode = insn_data[icode].operand[0].mode;
16749 if (target == 0
16750 || GET_MODE (target) != tmode
16751 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
16752 target = gen_reg_rtx (tmode);
16754 pat = GEN_FCN (icode) (target);
16755 if (! pat)
16756 return 0;
16757 emit_insn (pat);
16758 return target;
16759 case SPE_BUILTIN_MTSPEFSCR:
16760 icode = CODE_FOR_spe_mtspefscr;
16761 arg0 = CALL_EXPR_ARG (exp, 0);
16762 op0 = expand_normal (arg0);
16763 mode0 = insn_data[icode].operand[0].mode;
16765 if (arg0 == error_mark_node)
16766 return const0_rtx;
16768 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
16769 op0 = copy_to_mode_reg (mode0, op0);
16771 pat = GEN_FCN (icode) (op0);
16772 if (pat)
16773 emit_insn (pat);
16774 return NULL_RTX;
16775 default:
16776 break;
16779 *expandedp = false;
16780 return NULL_RTX;
16783 static rtx
16784 paired_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
16786 rtx pat, scratch, tmp;
16787 tree form = CALL_EXPR_ARG (exp, 0);
16788 tree arg0 = CALL_EXPR_ARG (exp, 1);
16789 tree arg1 = CALL_EXPR_ARG (exp, 2);
16790 rtx op0 = expand_normal (arg0);
16791 rtx op1 = expand_normal (arg1);
16792 machine_mode mode0 = insn_data[icode].operand[1].mode;
16793 machine_mode mode1 = insn_data[icode].operand[2].mode;
16794 int form_int;
16795 enum rtx_code code;
16797 if (TREE_CODE (form) != INTEGER_CST)
16799 error ("argument 1 of __builtin_paired_predicate must be a constant");
16800 return const0_rtx;
16802 else
16803 form_int = TREE_INT_CST_LOW (form);
16805 gcc_assert (mode0 == mode1);
16807 if (arg0 == error_mark_node || arg1 == error_mark_node)
16808 return const0_rtx;
16810 if (target == 0
16811 || GET_MODE (target) != SImode
16812 || !(*insn_data[icode].operand[0].predicate) (target, SImode))
16813 target = gen_reg_rtx (SImode);
16814 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
16815 op0 = copy_to_mode_reg (mode0, op0);
16816 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
16817 op1 = copy_to_mode_reg (mode1, op1);
16819 scratch = gen_reg_rtx (CCFPmode);
16821 pat = GEN_FCN (icode) (scratch, op0, op1);
16822 if (!pat)
16823 return const0_rtx;
16825 emit_insn (pat);
16827 switch (form_int)
16829 /* LT bit. */
16830 case 0:
16831 code = LT;
16832 break;
16833 /* GT bit. */
16834 case 1:
16835 code = GT;
16836 break;
16837 /* EQ bit. */
16838 case 2:
16839 code = EQ;
16840 break;
16841 /* UN bit. */
16842 case 3:
16843 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
16844 return target;
16845 default:
16846 error ("argument 1 of __builtin_paired_predicate is out of range");
16847 return const0_rtx;
16850 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
16851 emit_move_insn (target, tmp);
16852 return target;
16855 static rtx
16856 spe_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
16858 rtx pat, scratch, tmp;
16859 tree form = CALL_EXPR_ARG (exp, 0);
16860 tree arg0 = CALL_EXPR_ARG (exp, 1);
16861 tree arg1 = CALL_EXPR_ARG (exp, 2);
16862 rtx op0 = expand_normal (arg0);
16863 rtx op1 = expand_normal (arg1);
16864 machine_mode mode0 = insn_data[icode].operand[1].mode;
16865 machine_mode mode1 = insn_data[icode].operand[2].mode;
16866 int form_int;
16867 enum rtx_code code;
16869 if (TREE_CODE (form) != INTEGER_CST)
16871 error ("argument 1 of __builtin_spe_predicate must be a constant");
16872 return const0_rtx;
16874 else
16875 form_int = TREE_INT_CST_LOW (form);
16877 gcc_assert (mode0 == mode1);
16879 if (arg0 == error_mark_node || arg1 == error_mark_node)
16880 return const0_rtx;
16882 if (target == 0
16883 || GET_MODE (target) != SImode
16884 || ! (*insn_data[icode].operand[0].predicate) (target, SImode))
16885 target = gen_reg_rtx (SImode);
16887 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
16888 op0 = copy_to_mode_reg (mode0, op0);
16889 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
16890 op1 = copy_to_mode_reg (mode1, op1);
16892 scratch = gen_reg_rtx (CCmode);
16894 pat = GEN_FCN (icode) (scratch, op0, op1);
16895 if (! pat)
16896 return const0_rtx;
16897 emit_insn (pat);
16899 /* There are 4 variants for each predicate: _any_, _all_, _upper_,
16900 _lower_. We use one compare, but look in different bits of the
16901 CR for each variant.
16903 There are 2 elements in each SPE simd type (upper/lower). The CR
16904 bits are set as follows:
16906 BIT0 | BIT 1 | BIT 2 | BIT 3
16907 U | L | (U | L) | (U & L)
16909 So, for an "all" relationship, BIT 3 would be set.
16910 For an "any" relationship, BIT 2 would be set. Etc.
16912 Following traditional nomenclature, these bits map to:
16914 BIT0 | BIT 1 | BIT 2 | BIT 3
16915 LT | GT | EQ | OV
16917 Later, we will generate rtl to look in the LT/EQ/EQ/OV bits.
16920 switch (form_int)
16922 /* All variant. OV bit. */
16923 case 0:
16924 /* We need to get to the OV bit, which is the ORDERED bit. We
16925 could generate (ordered:SI (reg:CC xx) (const_int 0)), but
16926 that's ugly and will make validate_condition_mode die.
16927 So let's just use another pattern. */
16928 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
16929 return target;
16930 /* Any variant. EQ bit. */
16931 case 1:
16932 code = EQ;
16933 break;
16934 /* Upper variant. LT bit. */
16935 case 2:
16936 code = LT;
16937 break;
16938 /* Lower variant. GT bit. */
16939 case 3:
16940 code = GT;
16941 break;
16942 default:
16943 error ("argument 1 of __builtin_spe_predicate is out of range");
16944 return const0_rtx;
16947 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
16948 emit_move_insn (target, tmp);
16950 return target;
16953 /* The evsel builtins look like this:
16955 e = __builtin_spe_evsel_OP (a, b, c, d);
16957 and work like this:
16959 e[upper] = a[upper] *OP* b[upper] ? c[upper] : d[upper];
16960 e[lower] = a[lower] *OP* b[lower] ? c[lower] : d[lower];
16963 static rtx
16964 spe_expand_evsel_builtin (enum insn_code icode, tree exp, rtx target)
16966 rtx pat, scratch;
16967 tree arg0 = CALL_EXPR_ARG (exp, 0);
16968 tree arg1 = CALL_EXPR_ARG (exp, 1);
16969 tree arg2 = CALL_EXPR_ARG (exp, 2);
16970 tree arg3 = CALL_EXPR_ARG (exp, 3);
16971 rtx op0 = expand_normal (arg0);
16972 rtx op1 = expand_normal (arg1);
16973 rtx op2 = expand_normal (arg2);
16974 rtx op3 = expand_normal (arg3);
16975 machine_mode mode0 = insn_data[icode].operand[1].mode;
16976 machine_mode mode1 = insn_data[icode].operand[2].mode;
16978 gcc_assert (mode0 == mode1);
16980 if (arg0 == error_mark_node || arg1 == error_mark_node
16981 || arg2 == error_mark_node || arg3 == error_mark_node)
16982 return const0_rtx;
16984 if (target == 0
16985 || GET_MODE (target) != mode0
16986 || ! (*insn_data[icode].operand[0].predicate) (target, mode0))
16987 target = gen_reg_rtx (mode0);
16989 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
16990 op0 = copy_to_mode_reg (mode0, op0);
16991 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
16992 op1 = copy_to_mode_reg (mode0, op1);
16993 if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
16994 op2 = copy_to_mode_reg (mode0, op2);
16995 if (! (*insn_data[icode].operand[1].predicate) (op3, mode1))
16996 op3 = copy_to_mode_reg (mode0, op3);
16998 /* Generate the compare. */
16999 scratch = gen_reg_rtx (CCmode);
17000 pat = GEN_FCN (icode) (scratch, op0, op1);
17001 if (! pat)
17002 return const0_rtx;
17003 emit_insn (pat);
17005 if (mode0 == V2SImode)
17006 emit_insn (gen_spe_evsel (target, op2, op3, scratch));
17007 else
17008 emit_insn (gen_spe_evsel_fs (target, op2, op3, scratch));
17010 return target;
17013 /* Raise an error message for a builtin function that is called without the
17014 appropriate target options being set. */
17016 static void
17017 rs6000_invalid_builtin (enum rs6000_builtins fncode)
17019 size_t uns_fncode = (size_t)fncode;
17020 const char *name = rs6000_builtin_info[uns_fncode].name;
17021 HOST_WIDE_INT fnmask = rs6000_builtin_info[uns_fncode].mask;
17023 gcc_assert (name != NULL);
17024 if ((fnmask & RS6000_BTM_CELL) != 0)
17025 error ("Builtin function %s is only valid for the cell processor", name);
17026 else if ((fnmask & RS6000_BTM_VSX) != 0)
17027 error ("Builtin function %s requires the -mvsx option", name);
17028 else if ((fnmask & RS6000_BTM_HTM) != 0)
17029 error ("Builtin function %s requires the -mhtm option", name);
17030 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
17031 error ("Builtin function %s requires the -maltivec option", name);
17032 else if ((fnmask & RS6000_BTM_PAIRED) != 0)
17033 error ("Builtin function %s requires the -mpaired option", name);
17034 else if ((fnmask & RS6000_BTM_SPE) != 0)
17035 error ("Builtin function %s requires the -mspe option", name);
17036 else if ((fnmask & (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
17037 == (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
17038 error ("Builtin function %s requires the -mhard-dfp and"
17039 " -mpower8-vector options", name);
17040 else if ((fnmask & RS6000_BTM_DFP) != 0)
17041 error ("Builtin function %s requires the -mhard-dfp option", name);
17042 else if ((fnmask & RS6000_BTM_P8_VECTOR) != 0)
17043 error ("Builtin function %s requires the -mpower8-vector option", name);
17044 else if ((fnmask & (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
17045 == (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
17046 error ("Builtin function %s requires the -mcpu=power9 and"
17047 " -m64 options", name);
17048 else if ((fnmask & RS6000_BTM_P9_VECTOR) != 0)
17049 error ("Builtin function %s requires the -mcpu=power9 option", name);
17050 else if ((fnmask & (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
17051 == (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
17052 error ("Builtin function %s requires the -mcpu=power9 and"
17053 " -m64 options", name);
17054 else if ((fnmask & RS6000_BTM_P9_MISC) == RS6000_BTM_P9_MISC)
17055 error ("Builtin function %s requires the -mcpu=power9 option", name);
17056 else if ((fnmask & (RS6000_BTM_HARD_FLOAT | RS6000_BTM_LDBL128))
17057 == (RS6000_BTM_HARD_FLOAT | RS6000_BTM_LDBL128))
17058 error ("Builtin function %s requires the -mhard-float and"
17059 " -mlong-double-128 options", name);
17060 else if ((fnmask & RS6000_BTM_HARD_FLOAT) != 0)
17061 error ("Builtin function %s requires the -mhard-float option", name);
17062 else if ((fnmask & RS6000_BTM_FLOAT128) != 0)
17063 error ("Builtin function %s requires the -mfloat128 option", name);
17064 else
17065 error ("Builtin function %s is not supported with the current options",
17066 name);
17069 /* Target hook for early folding of built-ins, shamelessly stolen
17070 from ia64.c. */
17072 static tree
17073 rs6000_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED,
17074 tree *args, bool ignore ATTRIBUTE_UNUSED)
17076 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD)
17078 enum rs6000_builtins fn_code
17079 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
17080 switch (fn_code)
17082 case RS6000_BUILTIN_NANQ:
17083 case RS6000_BUILTIN_NANSQ:
17085 tree type = TREE_TYPE (TREE_TYPE (fndecl));
17086 const char *str = c_getstr (*args);
17087 int quiet = fn_code == RS6000_BUILTIN_NANQ;
17088 REAL_VALUE_TYPE real;
17090 if (str && real_nan (&real, str, quiet, TYPE_MODE (type)))
17091 return build_real (type, real);
17092 return NULL_TREE;
17094 case RS6000_BUILTIN_INFQ:
17095 case RS6000_BUILTIN_HUGE_VALQ:
17097 tree type = TREE_TYPE (TREE_TYPE (fndecl));
17098 REAL_VALUE_TYPE inf;
17099 real_inf (&inf);
17100 return build_real (type, inf);
17102 default:
17103 break;
17106 #ifdef SUBTARGET_FOLD_BUILTIN
17107 return SUBTARGET_FOLD_BUILTIN (fndecl, n_args, args, ignore);
17108 #else
17109 return NULL_TREE;
17110 #endif
17113 /* Fold a machine-dependent built-in in GIMPLE. (For folding into
17114 a constant, use rs6000_fold_builtin.) */
17116 bool
17117 rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
17119 gimple *stmt = gsi_stmt (*gsi);
17120 tree fndecl = gimple_call_fndecl (stmt);
17121 gcc_checking_assert (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD);
17122 enum rs6000_builtins fn_code
17123 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
17124 tree arg0, arg1, lhs;
17126 switch (fn_code)
17128 /* Flavors of vec_add. We deliberately don't expand
17129 P8V_BUILTIN_VADDUQM as it gets lowered from V1TImode to
17130 TImode, resulting in much poorer code generation. */
17131 case ALTIVEC_BUILTIN_VADDUBM:
17132 case ALTIVEC_BUILTIN_VADDUHM:
17133 case ALTIVEC_BUILTIN_VADDUWM:
17134 case P8V_BUILTIN_VADDUDM:
17135 case ALTIVEC_BUILTIN_VADDFP:
17136 case VSX_BUILTIN_XVADDDP:
17138 arg0 = gimple_call_arg (stmt, 0);
17139 arg1 = gimple_call_arg (stmt, 1);
17140 lhs = gimple_call_lhs (stmt);
17141 gimple *g = gimple_build_assign (lhs, PLUS_EXPR, arg0, arg1);
17142 gimple_set_location (g, gimple_location (stmt));
17143 gsi_replace (gsi, g, true);
17144 return true;
17146 /* Flavors of vec_sub. We deliberately don't expand
17147 P8V_BUILTIN_VSUBUQM. */
17148 case ALTIVEC_BUILTIN_VSUBUBM:
17149 case ALTIVEC_BUILTIN_VSUBUHM:
17150 case ALTIVEC_BUILTIN_VSUBUWM:
17151 case P8V_BUILTIN_VSUBUDM:
17152 case ALTIVEC_BUILTIN_VSUBFP:
17153 case VSX_BUILTIN_XVSUBDP:
17155 arg0 = gimple_call_arg (stmt, 0);
17156 arg1 = gimple_call_arg (stmt, 1);
17157 lhs = gimple_call_lhs (stmt);
17158 gimple *g = gimple_build_assign (lhs, MINUS_EXPR, arg0, arg1);
17159 gimple_set_location (g, gimple_location (stmt));
17160 gsi_replace (gsi, g, true);
17161 return true;
17163 case VSX_BUILTIN_XVMULSP:
17164 case VSX_BUILTIN_XVMULDP:
17166 arg0 = gimple_call_arg (stmt, 0);
17167 arg1 = gimple_call_arg (stmt, 1);
17168 lhs = gimple_call_lhs (stmt);
17169 gimple *g = gimple_build_assign (lhs, MULT_EXPR, arg0, arg1);
17170 gimple_set_location (g, gimple_location (stmt));
17171 gsi_replace (gsi, g, true);
17172 return true;
17174 /* Even element flavors of vec_mul (signed). */
17175 case ALTIVEC_BUILTIN_VMULESB:
17176 case ALTIVEC_BUILTIN_VMULESH:
17177 /* Even element flavors of vec_mul (unsigned). */
17178 case ALTIVEC_BUILTIN_VMULEUB:
17179 case ALTIVEC_BUILTIN_VMULEUH:
17181 arg0 = gimple_call_arg (stmt, 0);
17182 arg1 = gimple_call_arg (stmt, 1);
17183 lhs = gimple_call_lhs (stmt);
17184 gimple *g = gimple_build_assign (lhs, VEC_WIDEN_MULT_EVEN_EXPR, arg0, arg1);
17185 gimple_set_location (g, gimple_location (stmt));
17186 gsi_replace (gsi, g, true);
17187 return true;
17189 /* Odd element flavors of vec_mul (signed). */
17190 case ALTIVEC_BUILTIN_VMULOSB:
17191 case ALTIVEC_BUILTIN_VMULOSH:
17192 /* Odd element flavors of vec_mul (unsigned). */
17193 case ALTIVEC_BUILTIN_VMULOUB:
17194 case ALTIVEC_BUILTIN_VMULOUH:
17196 arg0 = gimple_call_arg (stmt, 0);
17197 arg1 = gimple_call_arg (stmt, 1);
17198 lhs = gimple_call_lhs (stmt);
17199 gimple *g = gimple_build_assign (lhs, VEC_WIDEN_MULT_ODD_EXPR, arg0, arg1);
17200 gimple_set_location (g, gimple_location (stmt));
17201 gsi_replace (gsi, g, true);
17202 return true;
17204 /* Flavors of vec_div (Integer). */
17205 case VSX_BUILTIN_DIV_V2DI:
17206 case VSX_BUILTIN_UDIV_V2DI:
17208 arg0 = gimple_call_arg (stmt, 0);
17209 arg1 = gimple_call_arg (stmt, 1);
17210 lhs = gimple_call_lhs (stmt);
17211 gimple *g = gimple_build_assign (lhs, TRUNC_DIV_EXPR, arg0, arg1);
17212 gimple_set_location (g, gimple_location (stmt));
17213 gsi_replace (gsi, g, true);
17214 return true;
17216 /* Flavors of vec_div (Float). */
17217 case VSX_BUILTIN_XVDIVSP:
17218 case VSX_BUILTIN_XVDIVDP:
17220 arg0 = gimple_call_arg (stmt, 0);
17221 arg1 = gimple_call_arg (stmt, 1);
17222 lhs = gimple_call_lhs (stmt);
17223 gimple *g = gimple_build_assign (lhs, RDIV_EXPR, arg0, arg1);
17224 gimple_set_location (g, gimple_location (stmt));
17225 gsi_replace (gsi, g, true);
17226 return true;
17228 /* Flavors of vec_and. */
17229 case ALTIVEC_BUILTIN_VAND:
17231 arg0 = gimple_call_arg (stmt, 0);
17232 arg1 = gimple_call_arg (stmt, 1);
17233 lhs = gimple_call_lhs (stmt);
17234 gimple *g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, arg1);
17235 gimple_set_location (g, gimple_location (stmt));
17236 gsi_replace (gsi, g, true);
17237 return true;
17239 /* Flavors of vec_andc. */
17240 case ALTIVEC_BUILTIN_VANDC:
17242 arg0 = gimple_call_arg (stmt, 0);
17243 arg1 = gimple_call_arg (stmt, 1);
17244 lhs = gimple_call_lhs (stmt);
17245 tree temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
17246 gimple *g = gimple_build_assign(temp, BIT_NOT_EXPR, arg1);
17247 gimple_set_location (g, gimple_location (stmt));
17248 gsi_insert_before(gsi, g, GSI_SAME_STMT);
17249 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, temp);
17250 gimple_set_location (g, gimple_location (stmt));
17251 gsi_replace (gsi, g, true);
17252 return true;
17254 /* Flavors of vec_nand. */
17255 case P8V_BUILTIN_VEC_NAND:
17256 case P8V_BUILTIN_NAND_V16QI:
17257 case P8V_BUILTIN_NAND_V8HI:
17258 case P8V_BUILTIN_NAND_V4SI:
17259 case P8V_BUILTIN_NAND_V4SF:
17260 case P8V_BUILTIN_NAND_V2DF:
17261 case P8V_BUILTIN_NAND_V2DI:
17263 arg0 = gimple_call_arg (stmt, 0);
17264 arg1 = gimple_call_arg (stmt, 1);
17265 lhs = gimple_call_lhs (stmt);
17266 tree temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
17267 gimple *g = gimple_build_assign(temp, BIT_AND_EXPR, arg0, arg1);
17268 gimple_set_location (g, gimple_location (stmt));
17269 gsi_insert_before(gsi, g, GSI_SAME_STMT);
17270 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
17271 gimple_set_location (g, gimple_location (stmt));
17272 gsi_replace (gsi, g, true);
17273 return true;
17275 /* Flavors of vec_or. */
17276 case ALTIVEC_BUILTIN_VOR:
17278 arg0 = gimple_call_arg (stmt, 0);
17279 arg1 = gimple_call_arg (stmt, 1);
17280 lhs = gimple_call_lhs (stmt);
17281 gimple *g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, arg1);
17282 gimple_set_location (g, gimple_location (stmt));
17283 gsi_replace (gsi, g, true);
17284 return true;
17286 /* flavors of vec_orc. */
17287 case P8V_BUILTIN_ORC_V16QI:
17288 case P8V_BUILTIN_ORC_V8HI:
17289 case P8V_BUILTIN_ORC_V4SI:
17290 case P8V_BUILTIN_ORC_V4SF:
17291 case P8V_BUILTIN_ORC_V2DF:
17292 case P8V_BUILTIN_ORC_V2DI:
17294 arg0 = gimple_call_arg (stmt, 0);
17295 arg1 = gimple_call_arg (stmt, 1);
17296 lhs = gimple_call_lhs (stmt);
17297 tree temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
17298 gimple *g = gimple_build_assign(temp, BIT_NOT_EXPR, arg1);
17299 gimple_set_location (g, gimple_location (stmt));
17300 gsi_insert_before(gsi, g, GSI_SAME_STMT);
17301 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, temp);
17302 gimple_set_location (g, gimple_location (stmt));
17303 gsi_replace (gsi, g, true);
17304 return true;
17306 /* Flavors of vec_xor. */
17307 case ALTIVEC_BUILTIN_VXOR:
17309 arg0 = gimple_call_arg (stmt, 0);
17310 arg1 = gimple_call_arg (stmt, 1);
17311 lhs = gimple_call_lhs (stmt);
17312 gimple *g = gimple_build_assign (lhs, BIT_XOR_EXPR, arg0, arg1);
17313 gimple_set_location (g, gimple_location (stmt));
17314 gsi_replace (gsi, g, true);
17315 return true;
17317 /* Flavors of vec_nor. */
17318 case ALTIVEC_BUILTIN_VNOR:
17320 arg0 = gimple_call_arg (stmt, 0);
17321 arg1 = gimple_call_arg (stmt, 1);
17322 lhs = gimple_call_lhs (stmt);
17323 tree temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
17324 gimple *g = gimple_build_assign (temp, BIT_IOR_EXPR, arg0, arg1);
17325 gimple_set_location (g, gimple_location (stmt));
17326 gsi_insert_before(gsi, g, GSI_SAME_STMT);
17327 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
17328 gimple_set_location (g, gimple_location (stmt));
17329 gsi_replace (gsi, g, true);
17330 return true;
17332 /* flavors of vec_abs. */
17333 case ALTIVEC_BUILTIN_ABS_V16QI:
17334 case ALTIVEC_BUILTIN_ABS_V8HI:
17335 case ALTIVEC_BUILTIN_ABS_V4SI:
17336 case ALTIVEC_BUILTIN_ABS_V4SF:
17337 case P8V_BUILTIN_ABS_V2DI:
17338 case VSX_BUILTIN_XVABSDP:
17340 arg0 = gimple_call_arg (stmt, 0);
17341 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0)))
17342 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0))))
17343 return false;
17344 lhs = gimple_call_lhs (stmt);
17345 gimple *g = gimple_build_assign (lhs, ABS_EXPR, arg0);
17346 gimple_set_location (g, gimple_location (stmt));
17347 gsi_replace (gsi, g, true);
17348 return true;
17350 default:
17351 break;
17354 return false;
17357 /* Expand an expression EXP that calls a built-in function,
17358 with result going to TARGET if that's convenient
17359 (and in mode MODE if that's convenient).
17360 SUBTARGET may be used as the target for computing one of EXP's operands.
17361 IGNORE is nonzero if the value is to be ignored. */
17363 static rtx
17364 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
17365 machine_mode mode ATTRIBUTE_UNUSED,
17366 int ignore ATTRIBUTE_UNUSED)
17368 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
17369 enum rs6000_builtins fcode
17370 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
17371 size_t uns_fcode = (size_t)fcode;
17372 const struct builtin_description *d;
17373 size_t i;
17374 rtx ret;
17375 bool success;
17376 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask;
17377 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
17379 if (TARGET_DEBUG_BUILTIN)
17381 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
17382 const char *name1 = rs6000_builtin_info[uns_fcode].name;
17383 const char *name2 = ((icode != CODE_FOR_nothing)
17384 ? get_insn_name ((int)icode)
17385 : "nothing");
17386 const char *name3;
17388 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
17390 default: name3 = "unknown"; break;
17391 case RS6000_BTC_SPECIAL: name3 = "special"; break;
17392 case RS6000_BTC_UNARY: name3 = "unary"; break;
17393 case RS6000_BTC_BINARY: name3 = "binary"; break;
17394 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
17395 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
17396 case RS6000_BTC_ABS: name3 = "abs"; break;
17397 case RS6000_BTC_EVSEL: name3 = "evsel"; break;
17398 case RS6000_BTC_DST: name3 = "dst"; break;
17402 fprintf (stderr,
17403 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
17404 (name1) ? name1 : "---", fcode,
17405 (name2) ? name2 : "---", (int)icode,
17406 name3,
17407 func_valid_p ? "" : ", not valid");
17410 if (!func_valid_p)
17412 rs6000_invalid_builtin (fcode);
17414 /* Given it is invalid, just generate a normal call. */
17415 return expand_call (exp, target, ignore);
17418 switch (fcode)
17420 case RS6000_BUILTIN_RECIP:
17421 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
17423 case RS6000_BUILTIN_RECIPF:
17424 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
17426 case RS6000_BUILTIN_RSQRTF:
17427 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
17429 case RS6000_BUILTIN_RSQRT:
17430 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
17432 case POWER7_BUILTIN_BPERMD:
17433 return rs6000_expand_binop_builtin (((TARGET_64BIT)
17434 ? CODE_FOR_bpermd_di
17435 : CODE_FOR_bpermd_si), exp, target);
17437 case RS6000_BUILTIN_GET_TB:
17438 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
17439 target);
17441 case RS6000_BUILTIN_MFTB:
17442 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
17443 ? CODE_FOR_rs6000_mftb_di
17444 : CODE_FOR_rs6000_mftb_si),
17445 target);
17447 case RS6000_BUILTIN_MFFS:
17448 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs, target);
17450 case RS6000_BUILTIN_MTFSF:
17451 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf, exp);
17453 case RS6000_BUILTIN_CPU_INIT:
17454 case RS6000_BUILTIN_CPU_IS:
17455 case RS6000_BUILTIN_CPU_SUPPORTS:
17456 return cpu_expand_builtin (fcode, exp, target);
17458 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
17459 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
17461 int icode = (BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr_direct
17462 : (int) CODE_FOR_altivec_lvsl_direct);
17463 machine_mode tmode = insn_data[icode].operand[0].mode;
17464 machine_mode mode = insn_data[icode].operand[1].mode;
17465 tree arg;
17466 rtx op, addr, pat;
17468 gcc_assert (TARGET_ALTIVEC);
17470 arg = CALL_EXPR_ARG (exp, 0);
17471 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
17472 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
17473 addr = memory_address (mode, op);
17474 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
17475 op = addr;
17476 else
17478 /* For the load case need to negate the address. */
17479 op = gen_reg_rtx (GET_MODE (addr));
17480 emit_insn (gen_rtx_SET (op, gen_rtx_NEG (GET_MODE (addr), addr)));
17482 op = gen_rtx_MEM (mode, op);
17484 if (target == 0
17485 || GET_MODE (target) != tmode
17486 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
17487 target = gen_reg_rtx (tmode);
17489 pat = GEN_FCN (icode) (target, op);
17490 if (!pat)
17491 return 0;
17492 emit_insn (pat);
17494 return target;
17497 case ALTIVEC_BUILTIN_VCFUX:
17498 case ALTIVEC_BUILTIN_VCFSX:
17499 case ALTIVEC_BUILTIN_VCTUXS:
17500 case ALTIVEC_BUILTIN_VCTSXS:
17501 /* FIXME: There's got to be a nicer way to handle this case than
17502 constructing a new CALL_EXPR. */
17503 if (call_expr_nargs (exp) == 1)
17505 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
17506 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
17508 break;
17510 default:
17511 break;
17514 if (TARGET_ALTIVEC)
17516 ret = altivec_expand_builtin (exp, target, &success);
17518 if (success)
17519 return ret;
17521 if (TARGET_SPE)
17523 ret = spe_expand_builtin (exp, target, &success);
17525 if (success)
17526 return ret;
17528 if (TARGET_PAIRED_FLOAT)
17530 ret = paired_expand_builtin (exp, target, &success);
17532 if (success)
17533 return ret;
17535 if (TARGET_HTM)
17537 ret = htm_expand_builtin (exp, target, &success);
17539 if (success)
17540 return ret;
17543 unsigned attr = rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK;
17544 /* RS6000_BTC_SPECIAL represents no-operand operators. */
17545 gcc_assert (attr == RS6000_BTC_UNARY
17546 || attr == RS6000_BTC_BINARY
17547 || attr == RS6000_BTC_TERNARY
17548 || attr == RS6000_BTC_SPECIAL);
17550 /* Handle simple unary operations. */
17551 d = bdesc_1arg;
17552 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
17553 if (d->code == fcode)
17554 return rs6000_expand_unop_builtin (d->icode, exp, target);
17556 /* Handle simple binary operations. */
17557 d = bdesc_2arg;
17558 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
17559 if (d->code == fcode)
17560 return rs6000_expand_binop_builtin (d->icode, exp, target);
17562 /* Handle simple ternary operations. */
17563 d = bdesc_3arg;
17564 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
17565 if (d->code == fcode)
17566 return rs6000_expand_ternop_builtin (d->icode, exp, target);
17568 /* Handle simple no-argument operations. */
17569 d = bdesc_0arg;
17570 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
17571 if (d->code == fcode)
17572 return rs6000_expand_zeroop_builtin (d->icode, target);
17574 gcc_unreachable ();
17577 /* Create a builtin vector type with a name. Taking care not to give
17578 the canonical type a name. */
17580 static tree
17581 rs6000_vector_type (const char *name, tree elt_type, unsigned num_elts)
17583 tree result = build_vector_type (elt_type, num_elts);
17585 /* Copy so we don't give the canonical type a name. */
17586 result = build_variant_type_copy (result);
17588 add_builtin_type (name, result);
17590 return result;
17593 static void
17594 rs6000_init_builtins (void)
17596 tree tdecl;
17597 tree ftype;
17598 machine_mode mode;
17600 if (TARGET_DEBUG_BUILTIN)
17601 fprintf (stderr, "rs6000_init_builtins%s%s%s%s\n",
17602 (TARGET_PAIRED_FLOAT) ? ", paired" : "",
17603 (TARGET_SPE) ? ", spe" : "",
17604 (TARGET_ALTIVEC) ? ", altivec" : "",
17605 (TARGET_VSX) ? ", vsx" : "");
17607 V2SI_type_node = build_vector_type (intSI_type_node, 2);
17608 V2SF_type_node = build_vector_type (float_type_node, 2);
17609 V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64 ? "__vector long"
17610 : "__vector long long",
17611 intDI_type_node, 2);
17612 V2DF_type_node = rs6000_vector_type ("__vector double", double_type_node, 2);
17613 V4HI_type_node = build_vector_type (intHI_type_node, 4);
17614 V4SI_type_node = rs6000_vector_type ("__vector signed int",
17615 intSI_type_node, 4);
17616 V4SF_type_node = rs6000_vector_type ("__vector float", float_type_node, 4);
17617 V8HI_type_node = rs6000_vector_type ("__vector signed short",
17618 intHI_type_node, 8);
17619 V16QI_type_node = rs6000_vector_type ("__vector signed char",
17620 intQI_type_node, 16);
17622 unsigned_V16QI_type_node = rs6000_vector_type ("__vector unsigned char",
17623 unsigned_intQI_type_node, 16);
17624 unsigned_V8HI_type_node = rs6000_vector_type ("__vector unsigned short",
17625 unsigned_intHI_type_node, 8);
17626 unsigned_V4SI_type_node = rs6000_vector_type ("__vector unsigned int",
17627 unsigned_intSI_type_node, 4);
17628 unsigned_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
17629 ? "__vector unsigned long"
17630 : "__vector unsigned long long",
17631 unsigned_intDI_type_node, 2);
17633 opaque_V2SF_type_node = build_opaque_vector_type (float_type_node, 2);
17634 opaque_V2SI_type_node = build_opaque_vector_type (intSI_type_node, 2);
17635 opaque_p_V2SI_type_node = build_pointer_type (opaque_V2SI_type_node);
17636 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
17638 const_str_type_node
17639 = build_pointer_type (build_qualified_type (char_type_node,
17640 TYPE_QUAL_CONST));
17642 /* We use V1TI mode as a special container to hold __int128_t items that
17643 must live in VSX registers. */
17644 if (intTI_type_node)
17646 V1TI_type_node = rs6000_vector_type ("__vector __int128",
17647 intTI_type_node, 1);
17648 unsigned_V1TI_type_node
17649 = rs6000_vector_type ("__vector unsigned __int128",
17650 unsigned_intTI_type_node, 1);
17653 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
17654 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
17655 'vector unsigned short'. */
17657 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
17658 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
17659 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
17660 bool_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
17661 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
17663 long_integer_type_internal_node = long_integer_type_node;
17664 long_unsigned_type_internal_node = long_unsigned_type_node;
17665 long_long_integer_type_internal_node = long_long_integer_type_node;
17666 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
17667 intQI_type_internal_node = intQI_type_node;
17668 uintQI_type_internal_node = unsigned_intQI_type_node;
17669 intHI_type_internal_node = intHI_type_node;
17670 uintHI_type_internal_node = unsigned_intHI_type_node;
17671 intSI_type_internal_node = intSI_type_node;
17672 uintSI_type_internal_node = unsigned_intSI_type_node;
17673 intDI_type_internal_node = intDI_type_node;
17674 uintDI_type_internal_node = unsigned_intDI_type_node;
17675 intTI_type_internal_node = intTI_type_node;
17676 uintTI_type_internal_node = unsigned_intTI_type_node;
17677 float_type_internal_node = float_type_node;
17678 double_type_internal_node = double_type_node;
17679 long_double_type_internal_node = long_double_type_node;
17680 dfloat64_type_internal_node = dfloat64_type_node;
17681 dfloat128_type_internal_node = dfloat128_type_node;
17682 void_type_internal_node = void_type_node;
17684 /* 128-bit floating point support. KFmode is IEEE 128-bit floating point.
17685 IFmode is the IBM extended 128-bit format that is a pair of doubles.
17686 TFmode will be either IEEE 128-bit floating point or the IBM double-double
17687 format that uses a pair of doubles, depending on the switches and
17688 defaults.
17690 We do not enable the actual __float128 keyword unless the user explicitly
17691 asks for it, because the library support is not yet complete.
17693 If we don't support for either 128-bit IBM double double or IEEE 128-bit
17694 floating point, we need make sure the type is non-zero or else self-test
17695 fails during bootstrap.
17697 We don't register a built-in type for __ibm128 if the type is the same as
17698 long double. Instead we add a #define for __ibm128 in
17699 rs6000_cpu_cpp_builtins to long double. */
17700 if (TARGET_LONG_DOUBLE_128 && FLOAT128_IEEE_P (TFmode))
17702 ibm128_float_type_node = make_node (REAL_TYPE);
17703 TYPE_PRECISION (ibm128_float_type_node) = 128;
17704 SET_TYPE_MODE (ibm128_float_type_node, IFmode);
17705 layout_type (ibm128_float_type_node);
17707 lang_hooks.types.register_builtin_type (ibm128_float_type_node,
17708 "__ibm128");
17710 else
17711 ibm128_float_type_node = long_double_type_node;
17713 if (TARGET_FLOAT128_KEYWORD)
17715 ieee128_float_type_node = float128_type_node;
17716 lang_hooks.types.register_builtin_type (ieee128_float_type_node,
17717 "__float128");
17720 else if (TARGET_FLOAT128_TYPE)
17722 ieee128_float_type_node = make_node (REAL_TYPE);
17723 TYPE_PRECISION (ibm128_float_type_node) = 128;
17724 SET_TYPE_MODE (ieee128_float_type_node, KFmode);
17725 layout_type (ieee128_float_type_node);
17727 /* If we are not exporting the __float128/_Float128 keywords, we need a
17728 keyword to get the types created. Use __ieee128 as the dummy
17729 keyword. */
17730 lang_hooks.types.register_builtin_type (ieee128_float_type_node,
17731 "__ieee128");
17734 else
17735 ieee128_float_type_node = long_double_type_node;
17737 /* Initialize the modes for builtin_function_type, mapping a machine mode to
17738 tree type node. */
17739 builtin_mode_to_type[QImode][0] = integer_type_node;
17740 builtin_mode_to_type[HImode][0] = integer_type_node;
17741 builtin_mode_to_type[SImode][0] = intSI_type_node;
17742 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
17743 builtin_mode_to_type[DImode][0] = intDI_type_node;
17744 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
17745 builtin_mode_to_type[TImode][0] = intTI_type_node;
17746 builtin_mode_to_type[TImode][1] = unsigned_intTI_type_node;
17747 builtin_mode_to_type[SFmode][0] = float_type_node;
17748 builtin_mode_to_type[DFmode][0] = double_type_node;
17749 builtin_mode_to_type[IFmode][0] = ibm128_float_type_node;
17750 builtin_mode_to_type[KFmode][0] = ieee128_float_type_node;
17751 builtin_mode_to_type[TFmode][0] = long_double_type_node;
17752 builtin_mode_to_type[DDmode][0] = dfloat64_type_node;
17753 builtin_mode_to_type[TDmode][0] = dfloat128_type_node;
17754 builtin_mode_to_type[V1TImode][0] = V1TI_type_node;
17755 builtin_mode_to_type[V1TImode][1] = unsigned_V1TI_type_node;
17756 builtin_mode_to_type[V2SImode][0] = V2SI_type_node;
17757 builtin_mode_to_type[V2SFmode][0] = V2SF_type_node;
17758 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
17759 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
17760 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
17761 builtin_mode_to_type[V4HImode][0] = V4HI_type_node;
17762 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
17763 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
17764 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
17765 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
17766 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
17767 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
17768 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
17770 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
17771 TYPE_NAME (bool_char_type_node) = tdecl;
17773 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
17774 TYPE_NAME (bool_short_type_node) = tdecl;
17776 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
17777 TYPE_NAME (bool_int_type_node) = tdecl;
17779 tdecl = add_builtin_type ("__pixel", pixel_type_node);
17780 TYPE_NAME (pixel_type_node) = tdecl;
17782 bool_V16QI_type_node = rs6000_vector_type ("__vector __bool char",
17783 bool_char_type_node, 16);
17784 bool_V8HI_type_node = rs6000_vector_type ("__vector __bool short",
17785 bool_short_type_node, 8);
17786 bool_V4SI_type_node = rs6000_vector_type ("__vector __bool int",
17787 bool_int_type_node, 4);
17788 bool_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
17789 ? "__vector __bool long"
17790 : "__vector __bool long long",
17791 bool_long_type_node, 2);
17792 pixel_V8HI_type_node = rs6000_vector_type ("__vector __pixel",
17793 pixel_type_node, 8);
17795 /* Paired and SPE builtins are only available if you build a compiler with
17796 the appropriate options, so only create those builtins with the
17797 appropriate compiler option. Create Altivec and VSX builtins on machines
17798 with at least the general purpose extensions (970 and newer) to allow the
17799 use of the target attribute. */
17800 if (TARGET_PAIRED_FLOAT)
17801 paired_init_builtins ();
17802 if (TARGET_SPE)
17803 spe_init_builtins ();
17804 if (TARGET_EXTRA_BUILTINS)
17805 altivec_init_builtins ();
17806 if (TARGET_HTM)
17807 htm_init_builtins ();
17809 if (TARGET_EXTRA_BUILTINS || TARGET_SPE || TARGET_PAIRED_FLOAT)
17810 rs6000_common_init_builtins ();
17812 ftype = build_function_type_list (ieee128_float_type_node,
17813 const_str_type_node, NULL_TREE);
17814 def_builtin ("__builtin_nanq", ftype, RS6000_BUILTIN_NANQ);
17815 def_builtin ("__builtin_nansq", ftype, RS6000_BUILTIN_NANSQ);
17817 ftype = build_function_type_list (ieee128_float_type_node, NULL_TREE);
17818 def_builtin ("__builtin_infq", ftype, RS6000_BUILTIN_INFQ);
17819 def_builtin ("__builtin_huge_valq", ftype, RS6000_BUILTIN_HUGE_VALQ);
17821 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
17822 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
17823 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
17825 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
17826 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
17827 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
17829 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
17830 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
17831 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
17833 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
17834 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
17835 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
17837 mode = (TARGET_64BIT) ? DImode : SImode;
17838 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
17839 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
17840 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
17842 ftype = build_function_type_list (unsigned_intDI_type_node,
17843 NULL_TREE);
17844 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
17846 if (TARGET_64BIT)
17847 ftype = build_function_type_list (unsigned_intDI_type_node,
17848 NULL_TREE);
17849 else
17850 ftype = build_function_type_list (unsigned_intSI_type_node,
17851 NULL_TREE);
17852 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
17854 ftype = build_function_type_list (double_type_node, NULL_TREE);
17855 def_builtin ("__builtin_mffs", ftype, RS6000_BUILTIN_MFFS);
17857 ftype = build_function_type_list (void_type_node,
17858 intSI_type_node, double_type_node,
17859 NULL_TREE);
17860 def_builtin ("__builtin_mtfsf", ftype, RS6000_BUILTIN_MTFSF);
17862 ftype = build_function_type_list (void_type_node, NULL_TREE);
17863 def_builtin ("__builtin_cpu_init", ftype, RS6000_BUILTIN_CPU_INIT);
17865 ftype = build_function_type_list (bool_int_type_node, const_ptr_type_node,
17866 NULL_TREE);
17867 def_builtin ("__builtin_cpu_is", ftype, RS6000_BUILTIN_CPU_IS);
17868 def_builtin ("__builtin_cpu_supports", ftype, RS6000_BUILTIN_CPU_SUPPORTS);
17870 /* AIX libm provides clog as __clog. */
17871 if (TARGET_XCOFF &&
17872 (tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
17873 set_user_assembler_name (tdecl, "__clog");
17875 #ifdef SUBTARGET_INIT_BUILTINS
17876 SUBTARGET_INIT_BUILTINS;
17877 #endif
17880 /* Returns the rs6000 builtin decl for CODE. */
17882 static tree
17883 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
17885 HOST_WIDE_INT fnmask;
17887 if (code >= RS6000_BUILTIN_COUNT)
17888 return error_mark_node;
17890 fnmask = rs6000_builtin_info[code].mask;
17891 if ((fnmask & rs6000_builtin_mask) != fnmask)
17893 rs6000_invalid_builtin ((enum rs6000_builtins)code);
17894 return error_mark_node;
17897 return rs6000_builtin_decls[code];
17900 static void
17901 spe_init_builtins (void)
17903 tree puint_type_node = build_pointer_type (unsigned_type_node);
17904 tree pushort_type_node = build_pointer_type (short_unsigned_type_node);
17905 const struct builtin_description *d;
17906 size_t i;
17907 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17909 tree v2si_ftype_4_v2si
17910 = build_function_type_list (opaque_V2SI_type_node,
17911 opaque_V2SI_type_node,
17912 opaque_V2SI_type_node,
17913 opaque_V2SI_type_node,
17914 opaque_V2SI_type_node,
17915 NULL_TREE);
17917 tree v2sf_ftype_4_v2sf
17918 = build_function_type_list (opaque_V2SF_type_node,
17919 opaque_V2SF_type_node,
17920 opaque_V2SF_type_node,
17921 opaque_V2SF_type_node,
17922 opaque_V2SF_type_node,
17923 NULL_TREE);
17925 tree int_ftype_int_v2si_v2si
17926 = build_function_type_list (integer_type_node,
17927 integer_type_node,
17928 opaque_V2SI_type_node,
17929 opaque_V2SI_type_node,
17930 NULL_TREE);
17932 tree int_ftype_int_v2sf_v2sf
17933 = build_function_type_list (integer_type_node,
17934 integer_type_node,
17935 opaque_V2SF_type_node,
17936 opaque_V2SF_type_node,
17937 NULL_TREE);
17939 tree void_ftype_v2si_puint_int
17940 = build_function_type_list (void_type_node,
17941 opaque_V2SI_type_node,
17942 puint_type_node,
17943 integer_type_node,
17944 NULL_TREE);
17946 tree void_ftype_v2si_puint_char
17947 = build_function_type_list (void_type_node,
17948 opaque_V2SI_type_node,
17949 puint_type_node,
17950 char_type_node,
17951 NULL_TREE);
17953 tree void_ftype_v2si_pv2si_int
17954 = build_function_type_list (void_type_node,
17955 opaque_V2SI_type_node,
17956 opaque_p_V2SI_type_node,
17957 integer_type_node,
17958 NULL_TREE);
17960 tree void_ftype_v2si_pv2si_char
17961 = build_function_type_list (void_type_node,
17962 opaque_V2SI_type_node,
17963 opaque_p_V2SI_type_node,
17964 char_type_node,
17965 NULL_TREE);
17967 tree void_ftype_int
17968 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
17970 tree int_ftype_void
17971 = build_function_type_list (integer_type_node, NULL_TREE);
17973 tree v2si_ftype_pv2si_int
17974 = build_function_type_list (opaque_V2SI_type_node,
17975 opaque_p_V2SI_type_node,
17976 integer_type_node,
17977 NULL_TREE);
17979 tree v2si_ftype_puint_int
17980 = build_function_type_list (opaque_V2SI_type_node,
17981 puint_type_node,
17982 integer_type_node,
17983 NULL_TREE);
17985 tree v2si_ftype_pushort_int
17986 = build_function_type_list (opaque_V2SI_type_node,
17987 pushort_type_node,
17988 integer_type_node,
17989 NULL_TREE);
17991 tree v2si_ftype_signed_char
17992 = build_function_type_list (opaque_V2SI_type_node,
17993 signed_char_type_node,
17994 NULL_TREE);
17996 add_builtin_type ("__ev64_opaque__", opaque_V2SI_type_node);
17998 /* Initialize irregular SPE builtins. */
18000 def_builtin ("__builtin_spe_mtspefscr", void_ftype_int, SPE_BUILTIN_MTSPEFSCR);
18001 def_builtin ("__builtin_spe_mfspefscr", int_ftype_void, SPE_BUILTIN_MFSPEFSCR);
18002 def_builtin ("__builtin_spe_evstddx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDDX);
18003 def_builtin ("__builtin_spe_evstdhx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDHX);
18004 def_builtin ("__builtin_spe_evstdwx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDWX);
18005 def_builtin ("__builtin_spe_evstwhex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHEX);
18006 def_builtin ("__builtin_spe_evstwhox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHOX);
18007 def_builtin ("__builtin_spe_evstwwex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWEX);
18008 def_builtin ("__builtin_spe_evstwwox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWOX);
18009 def_builtin ("__builtin_spe_evstdd", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDD);
18010 def_builtin ("__builtin_spe_evstdh", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDH);
18011 def_builtin ("__builtin_spe_evstdw", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDW);
18012 def_builtin ("__builtin_spe_evstwhe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHE);
18013 def_builtin ("__builtin_spe_evstwho", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHO);
18014 def_builtin ("__builtin_spe_evstwwe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWE);
18015 def_builtin ("__builtin_spe_evstwwo", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWO);
18016 def_builtin ("__builtin_spe_evsplatfi", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATFI);
18017 def_builtin ("__builtin_spe_evsplati", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATI);
18019 /* Loads. */
18020 def_builtin ("__builtin_spe_evlddx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDDX);
18021 def_builtin ("__builtin_spe_evldwx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDWX);
18022 def_builtin ("__builtin_spe_evldhx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDHX);
18023 def_builtin ("__builtin_spe_evlwhex", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHEX);
18024 def_builtin ("__builtin_spe_evlwhoux", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOUX);
18025 def_builtin ("__builtin_spe_evlwhosx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOSX);
18026 def_builtin ("__builtin_spe_evlwwsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLATX);
18027 def_builtin ("__builtin_spe_evlwhsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLATX);
18028 def_builtin ("__builtin_spe_evlhhesplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLATX);
18029 def_builtin ("__builtin_spe_evlhhousplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLATX);
18030 def_builtin ("__builtin_spe_evlhhossplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLATX);
18031 def_builtin ("__builtin_spe_evldd", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDD);
18032 def_builtin ("__builtin_spe_evldw", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDW);
18033 def_builtin ("__builtin_spe_evldh", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDH);
18034 def_builtin ("__builtin_spe_evlhhesplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLAT);
18035 def_builtin ("__builtin_spe_evlhhossplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLAT);
18036 def_builtin ("__builtin_spe_evlhhousplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLAT);
18037 def_builtin ("__builtin_spe_evlwhe", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHE);
18038 def_builtin ("__builtin_spe_evlwhos", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOS);
18039 def_builtin ("__builtin_spe_evlwhou", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOU);
18040 def_builtin ("__builtin_spe_evlwhsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLAT);
18041 def_builtin ("__builtin_spe_evlwwsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLAT);
18043 /* Predicates. */
18044 d = bdesc_spe_predicates;
18045 for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, d++)
18047 tree type;
18048 HOST_WIDE_INT mask = d->mask;
18050 if ((mask & builtin_mask) != mask)
18052 if (TARGET_DEBUG_BUILTIN)
18053 fprintf (stderr, "spe_init_builtins, skip predicate %s\n",
18054 d->name);
18055 continue;
18058 /* Cannot define builtin if the instruction is disabled. */
18059 gcc_assert (d->icode != CODE_FOR_nothing);
18060 switch (insn_data[d->icode].operand[1].mode)
18062 case V2SImode:
18063 type = int_ftype_int_v2si_v2si;
18064 break;
18065 case V2SFmode:
18066 type = int_ftype_int_v2sf_v2sf;
18067 break;
18068 default:
18069 gcc_unreachable ();
18072 def_builtin (d->name, type, d->code);
18075 /* Evsel predicates. */
18076 d = bdesc_spe_evsel;
18077 for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, d++)
18079 tree type;
18080 HOST_WIDE_INT mask = d->mask;
18082 if ((mask & builtin_mask) != mask)
18084 if (TARGET_DEBUG_BUILTIN)
18085 fprintf (stderr, "spe_init_builtins, skip evsel %s\n",
18086 d->name);
18087 continue;
18090 /* Cannot define builtin if the instruction is disabled. */
18091 gcc_assert (d->icode != CODE_FOR_nothing);
18092 switch (insn_data[d->icode].operand[1].mode)
18094 case V2SImode:
18095 type = v2si_ftype_4_v2si;
18096 break;
18097 case V2SFmode:
18098 type = v2sf_ftype_4_v2sf;
18099 break;
18100 default:
18101 gcc_unreachable ();
18104 def_builtin (d->name, type, d->code);
18108 static void
18109 paired_init_builtins (void)
18111 const struct builtin_description *d;
18112 size_t i;
18113 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
18115 tree int_ftype_int_v2sf_v2sf
18116 = build_function_type_list (integer_type_node,
18117 integer_type_node,
18118 V2SF_type_node,
18119 V2SF_type_node,
18120 NULL_TREE);
18121 tree pcfloat_type_node =
18122 build_pointer_type (build_qualified_type
18123 (float_type_node, TYPE_QUAL_CONST));
18125 tree v2sf_ftype_long_pcfloat = build_function_type_list (V2SF_type_node,
18126 long_integer_type_node,
18127 pcfloat_type_node,
18128 NULL_TREE);
18129 tree void_ftype_v2sf_long_pcfloat =
18130 build_function_type_list (void_type_node,
18131 V2SF_type_node,
18132 long_integer_type_node,
18133 pcfloat_type_node,
18134 NULL_TREE);
18137 def_builtin ("__builtin_paired_lx", v2sf_ftype_long_pcfloat,
18138 PAIRED_BUILTIN_LX);
18141 def_builtin ("__builtin_paired_stx", void_ftype_v2sf_long_pcfloat,
18142 PAIRED_BUILTIN_STX);
18144 /* Predicates. */
18145 d = bdesc_paired_preds;
18146 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); ++i, d++)
18148 tree type;
18149 HOST_WIDE_INT mask = d->mask;
18151 if ((mask & builtin_mask) != mask)
18153 if (TARGET_DEBUG_BUILTIN)
18154 fprintf (stderr, "paired_init_builtins, skip predicate %s\n",
18155 d->name);
18156 continue;
18159 /* Cannot define builtin if the instruction is disabled. */
18160 gcc_assert (d->icode != CODE_FOR_nothing);
18162 if (TARGET_DEBUG_BUILTIN)
18163 fprintf (stderr, "paired pred #%d, insn = %s [%d], mode = %s\n",
18164 (int)i, get_insn_name (d->icode), (int)d->icode,
18165 GET_MODE_NAME (insn_data[d->icode].operand[1].mode));
18167 switch (insn_data[d->icode].operand[1].mode)
18169 case V2SFmode:
18170 type = int_ftype_int_v2sf_v2sf;
18171 break;
18172 default:
18173 gcc_unreachable ();
18176 def_builtin (d->name, type, d->code);
18180 static void
18181 altivec_init_builtins (void)
18183 const struct builtin_description *d;
18184 size_t i;
18185 tree ftype;
18186 tree decl;
18187 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
18189 tree pvoid_type_node = build_pointer_type (void_type_node);
18191 tree pcvoid_type_node
18192 = build_pointer_type (build_qualified_type (void_type_node,
18193 TYPE_QUAL_CONST));
18195 tree int_ftype_opaque
18196 = build_function_type_list (integer_type_node,
18197 opaque_V4SI_type_node, NULL_TREE);
18198 tree opaque_ftype_opaque
18199 = build_function_type_list (integer_type_node, NULL_TREE);
18200 tree opaque_ftype_opaque_int
18201 = build_function_type_list (opaque_V4SI_type_node,
18202 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
18203 tree opaque_ftype_opaque_opaque_int
18204 = build_function_type_list (opaque_V4SI_type_node,
18205 opaque_V4SI_type_node, opaque_V4SI_type_node,
18206 integer_type_node, NULL_TREE);
18207 tree opaque_ftype_opaque_opaque_opaque
18208 = build_function_type_list (opaque_V4SI_type_node,
18209 opaque_V4SI_type_node, opaque_V4SI_type_node,
18210 opaque_V4SI_type_node, NULL_TREE);
18211 tree opaque_ftype_opaque_opaque
18212 = build_function_type_list (opaque_V4SI_type_node,
18213 opaque_V4SI_type_node, opaque_V4SI_type_node,
18214 NULL_TREE);
18215 tree int_ftype_int_opaque_opaque
18216 = build_function_type_list (integer_type_node,
18217 integer_type_node, opaque_V4SI_type_node,
18218 opaque_V4SI_type_node, NULL_TREE);
18219 tree int_ftype_int_v4si_v4si
18220 = build_function_type_list (integer_type_node,
18221 integer_type_node, V4SI_type_node,
18222 V4SI_type_node, NULL_TREE);
18223 tree int_ftype_int_v2di_v2di
18224 = build_function_type_list (integer_type_node,
18225 integer_type_node, V2DI_type_node,
18226 V2DI_type_node, NULL_TREE);
18227 tree void_ftype_v4si
18228 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
18229 tree v8hi_ftype_void
18230 = build_function_type_list (V8HI_type_node, NULL_TREE);
18231 tree void_ftype_void
18232 = build_function_type_list (void_type_node, NULL_TREE);
18233 tree void_ftype_int
18234 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
18236 tree opaque_ftype_long_pcvoid
18237 = build_function_type_list (opaque_V4SI_type_node,
18238 long_integer_type_node, pcvoid_type_node,
18239 NULL_TREE);
18240 tree v16qi_ftype_long_pcvoid
18241 = build_function_type_list (V16QI_type_node,
18242 long_integer_type_node, pcvoid_type_node,
18243 NULL_TREE);
18244 tree v8hi_ftype_long_pcvoid
18245 = build_function_type_list (V8HI_type_node,
18246 long_integer_type_node, pcvoid_type_node,
18247 NULL_TREE);
18248 tree v4si_ftype_long_pcvoid
18249 = build_function_type_list (V4SI_type_node,
18250 long_integer_type_node, pcvoid_type_node,
18251 NULL_TREE);
18252 tree v4sf_ftype_long_pcvoid
18253 = build_function_type_list (V4SF_type_node,
18254 long_integer_type_node, pcvoid_type_node,
18255 NULL_TREE);
18256 tree v2df_ftype_long_pcvoid
18257 = build_function_type_list (V2DF_type_node,
18258 long_integer_type_node, pcvoid_type_node,
18259 NULL_TREE);
18260 tree v2di_ftype_long_pcvoid
18261 = build_function_type_list (V2DI_type_node,
18262 long_integer_type_node, pcvoid_type_node,
18263 NULL_TREE);
18265 tree void_ftype_opaque_long_pvoid
18266 = build_function_type_list (void_type_node,
18267 opaque_V4SI_type_node, long_integer_type_node,
18268 pvoid_type_node, NULL_TREE);
18269 tree void_ftype_v4si_long_pvoid
18270 = build_function_type_list (void_type_node,
18271 V4SI_type_node, long_integer_type_node,
18272 pvoid_type_node, NULL_TREE);
18273 tree void_ftype_v16qi_long_pvoid
18274 = build_function_type_list (void_type_node,
18275 V16QI_type_node, long_integer_type_node,
18276 pvoid_type_node, NULL_TREE);
18278 tree void_ftype_v16qi_pvoid_long
18279 = build_function_type_list (void_type_node,
18280 V16QI_type_node, pvoid_type_node,
18281 long_integer_type_node, NULL_TREE);
18283 tree void_ftype_v8hi_long_pvoid
18284 = build_function_type_list (void_type_node,
18285 V8HI_type_node, long_integer_type_node,
18286 pvoid_type_node, NULL_TREE);
18287 tree void_ftype_v4sf_long_pvoid
18288 = build_function_type_list (void_type_node,
18289 V4SF_type_node, long_integer_type_node,
18290 pvoid_type_node, NULL_TREE);
18291 tree void_ftype_v2df_long_pvoid
18292 = build_function_type_list (void_type_node,
18293 V2DF_type_node, long_integer_type_node,
18294 pvoid_type_node, NULL_TREE);
18295 tree void_ftype_v2di_long_pvoid
18296 = build_function_type_list (void_type_node,
18297 V2DI_type_node, long_integer_type_node,
18298 pvoid_type_node, NULL_TREE);
18299 tree int_ftype_int_v8hi_v8hi
18300 = build_function_type_list (integer_type_node,
18301 integer_type_node, V8HI_type_node,
18302 V8HI_type_node, NULL_TREE);
18303 tree int_ftype_int_v16qi_v16qi
18304 = build_function_type_list (integer_type_node,
18305 integer_type_node, V16QI_type_node,
18306 V16QI_type_node, NULL_TREE);
18307 tree int_ftype_int_v4sf_v4sf
18308 = build_function_type_list (integer_type_node,
18309 integer_type_node, V4SF_type_node,
18310 V4SF_type_node, NULL_TREE);
18311 tree int_ftype_int_v2df_v2df
18312 = build_function_type_list (integer_type_node,
18313 integer_type_node, V2DF_type_node,
18314 V2DF_type_node, NULL_TREE);
18315 tree v2di_ftype_v2di
18316 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
18317 tree v4si_ftype_v4si
18318 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
18319 tree v8hi_ftype_v8hi
18320 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
18321 tree v16qi_ftype_v16qi
18322 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
18323 tree v4sf_ftype_v4sf
18324 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
18325 tree v2df_ftype_v2df
18326 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
18327 tree void_ftype_pcvoid_int_int
18328 = build_function_type_list (void_type_node,
18329 pcvoid_type_node, integer_type_node,
18330 integer_type_node, NULL_TREE);
18332 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
18333 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
18334 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
18335 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
18336 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
18337 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
18338 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
18339 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
18340 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
18341 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
18342 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid,
18343 ALTIVEC_BUILTIN_LVXL_V2DF);
18344 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid,
18345 ALTIVEC_BUILTIN_LVXL_V2DI);
18346 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid,
18347 ALTIVEC_BUILTIN_LVXL_V4SF);
18348 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid,
18349 ALTIVEC_BUILTIN_LVXL_V4SI);
18350 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid,
18351 ALTIVEC_BUILTIN_LVXL_V8HI);
18352 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid,
18353 ALTIVEC_BUILTIN_LVXL_V16QI);
18354 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
18355 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid,
18356 ALTIVEC_BUILTIN_LVX_V2DF);
18357 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid,
18358 ALTIVEC_BUILTIN_LVX_V2DI);
18359 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid,
18360 ALTIVEC_BUILTIN_LVX_V4SF);
18361 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid,
18362 ALTIVEC_BUILTIN_LVX_V4SI);
18363 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid,
18364 ALTIVEC_BUILTIN_LVX_V8HI);
18365 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid,
18366 ALTIVEC_BUILTIN_LVX_V16QI);
18367 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
18368 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid,
18369 ALTIVEC_BUILTIN_STVX_V2DF);
18370 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid,
18371 ALTIVEC_BUILTIN_STVX_V2DI);
18372 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid,
18373 ALTIVEC_BUILTIN_STVX_V4SF);
18374 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid,
18375 ALTIVEC_BUILTIN_STVX_V4SI);
18376 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid,
18377 ALTIVEC_BUILTIN_STVX_V8HI);
18378 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid,
18379 ALTIVEC_BUILTIN_STVX_V16QI);
18380 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
18381 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
18382 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid,
18383 ALTIVEC_BUILTIN_STVXL_V2DF);
18384 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid,
18385 ALTIVEC_BUILTIN_STVXL_V2DI);
18386 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid,
18387 ALTIVEC_BUILTIN_STVXL_V4SF);
18388 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid,
18389 ALTIVEC_BUILTIN_STVXL_V4SI);
18390 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid,
18391 ALTIVEC_BUILTIN_STVXL_V8HI);
18392 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid,
18393 ALTIVEC_BUILTIN_STVXL_V16QI);
18394 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
18395 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
18396 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
18397 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
18398 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
18399 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
18400 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
18401 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
18402 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
18403 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
18404 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
18405 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
18406 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
18407 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
18408 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
18409 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
18411 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
18412 VSX_BUILTIN_LXVD2X_V2DF);
18413 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
18414 VSX_BUILTIN_LXVD2X_V2DI);
18415 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
18416 VSX_BUILTIN_LXVW4X_V4SF);
18417 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
18418 VSX_BUILTIN_LXVW4X_V4SI);
18419 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
18420 VSX_BUILTIN_LXVW4X_V8HI);
18421 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
18422 VSX_BUILTIN_LXVW4X_V16QI);
18423 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
18424 VSX_BUILTIN_STXVD2X_V2DF);
18425 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
18426 VSX_BUILTIN_STXVD2X_V2DI);
18427 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
18428 VSX_BUILTIN_STXVW4X_V4SF);
18429 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
18430 VSX_BUILTIN_STXVW4X_V4SI);
18431 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
18432 VSX_BUILTIN_STXVW4X_V8HI);
18433 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
18434 VSX_BUILTIN_STXVW4X_V16QI);
18436 def_builtin ("__builtin_vsx_ld_elemrev_v2df", v2df_ftype_long_pcvoid,
18437 VSX_BUILTIN_LD_ELEMREV_V2DF);
18438 def_builtin ("__builtin_vsx_ld_elemrev_v2di", v2di_ftype_long_pcvoid,
18439 VSX_BUILTIN_LD_ELEMREV_V2DI);
18440 def_builtin ("__builtin_vsx_ld_elemrev_v4sf", v4sf_ftype_long_pcvoid,
18441 VSX_BUILTIN_LD_ELEMREV_V4SF);
18442 def_builtin ("__builtin_vsx_ld_elemrev_v4si", v4si_ftype_long_pcvoid,
18443 VSX_BUILTIN_LD_ELEMREV_V4SI);
18444 def_builtin ("__builtin_vsx_st_elemrev_v2df", void_ftype_v2df_long_pvoid,
18445 VSX_BUILTIN_ST_ELEMREV_V2DF);
18446 def_builtin ("__builtin_vsx_st_elemrev_v2di", void_ftype_v2di_long_pvoid,
18447 VSX_BUILTIN_ST_ELEMREV_V2DI);
18448 def_builtin ("__builtin_vsx_st_elemrev_v4sf", void_ftype_v4sf_long_pvoid,
18449 VSX_BUILTIN_ST_ELEMREV_V4SF);
18450 def_builtin ("__builtin_vsx_st_elemrev_v4si", void_ftype_v4si_long_pvoid,
18451 VSX_BUILTIN_ST_ELEMREV_V4SI);
18453 if (TARGET_P9_VECTOR)
18455 def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid,
18456 VSX_BUILTIN_LD_ELEMREV_V8HI);
18457 def_builtin ("__builtin_vsx_ld_elemrev_v16qi", v16qi_ftype_long_pcvoid,
18458 VSX_BUILTIN_LD_ELEMREV_V16QI);
18459 def_builtin ("__builtin_vsx_st_elemrev_v8hi",
18460 void_ftype_v8hi_long_pvoid, VSX_BUILTIN_ST_ELEMREV_V8HI);
18461 def_builtin ("__builtin_vsx_st_elemrev_v16qi",
18462 void_ftype_v16qi_long_pvoid, VSX_BUILTIN_ST_ELEMREV_V16QI);
18464 else
18466 rs6000_builtin_decls[(int) VSX_BUILTIN_LD_ELEMREV_V8HI]
18467 = rs6000_builtin_decls[(int) VSX_BUILTIN_LXVW4X_V8HI];
18468 rs6000_builtin_decls[(int) VSX_BUILTIN_LD_ELEMREV_V16QI]
18469 = rs6000_builtin_decls[(int) VSX_BUILTIN_LXVW4X_V16QI];
18470 rs6000_builtin_decls[(int) VSX_BUILTIN_ST_ELEMREV_V8HI]
18471 = rs6000_builtin_decls[(int) VSX_BUILTIN_STXVW4X_V8HI];
18472 rs6000_builtin_decls[(int) VSX_BUILTIN_ST_ELEMREV_V16QI]
18473 = rs6000_builtin_decls[(int) VSX_BUILTIN_STXVW4X_V16QI];
18476 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
18477 VSX_BUILTIN_VEC_LD);
18478 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
18479 VSX_BUILTIN_VEC_ST);
18480 def_builtin ("__builtin_vec_xl", opaque_ftype_long_pcvoid,
18481 VSX_BUILTIN_VEC_XL);
18482 def_builtin ("__builtin_vec_xst", void_ftype_opaque_long_pvoid,
18483 VSX_BUILTIN_VEC_XST);
18485 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
18486 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
18487 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
18489 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
18490 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
18491 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
18492 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
18493 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
18494 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
18495 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
18496 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
18497 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
18498 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
18499 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
18500 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
18502 def_builtin ("__builtin_vec_adde", opaque_ftype_opaque_opaque_opaque,
18503 ALTIVEC_BUILTIN_VEC_ADDE);
18504 def_builtin ("__builtin_vec_addec", opaque_ftype_opaque_opaque_opaque,
18505 ALTIVEC_BUILTIN_VEC_ADDEC);
18506 def_builtin ("__builtin_vec_cmpne", opaque_ftype_opaque_opaque,
18507 ALTIVEC_BUILTIN_VEC_CMPNE);
18508 def_builtin ("__builtin_vec_mul", opaque_ftype_opaque_opaque,
18509 ALTIVEC_BUILTIN_VEC_MUL);
18511 /* Cell builtins. */
18512 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
18513 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
18514 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
18515 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
18517 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
18518 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
18519 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
18520 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
18522 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
18523 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
18524 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
18525 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
18527 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
18528 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
18529 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
18530 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
18532 if (TARGET_P9_VECTOR)
18533 def_builtin ("__builtin_altivec_stxvl", void_ftype_v16qi_pvoid_long,
18534 P9V_BUILTIN_STXVL);
18536 /* Add the DST variants. */
18537 d = bdesc_dst;
18538 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
18540 HOST_WIDE_INT mask = d->mask;
18542 /* It is expected that these dst built-in functions may have
18543 d->icode equal to CODE_FOR_nothing. */
18544 if ((mask & builtin_mask) != mask)
18546 if (TARGET_DEBUG_BUILTIN)
18547 fprintf (stderr, "altivec_init_builtins, skip dst %s\n",
18548 d->name);
18549 continue;
18551 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
18554 /* Initialize the predicates. */
18555 d = bdesc_altivec_preds;
18556 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
18558 machine_mode mode1;
18559 tree type;
18560 HOST_WIDE_INT mask = d->mask;
18562 if ((mask & builtin_mask) != mask)
18564 if (TARGET_DEBUG_BUILTIN)
18565 fprintf (stderr, "altivec_init_builtins, skip predicate %s\n",
18566 d->name);
18567 continue;
18570 if (rs6000_overloaded_builtin_p (d->code))
18571 mode1 = VOIDmode;
18572 else
18574 /* Cannot define builtin if the instruction is disabled. */
18575 gcc_assert (d->icode != CODE_FOR_nothing);
18576 mode1 = insn_data[d->icode].operand[1].mode;
18579 switch (mode1)
18581 case VOIDmode:
18582 type = int_ftype_int_opaque_opaque;
18583 break;
18584 case V2DImode:
18585 type = int_ftype_int_v2di_v2di;
18586 break;
18587 case V4SImode:
18588 type = int_ftype_int_v4si_v4si;
18589 break;
18590 case V8HImode:
18591 type = int_ftype_int_v8hi_v8hi;
18592 break;
18593 case V16QImode:
18594 type = int_ftype_int_v16qi_v16qi;
18595 break;
18596 case V4SFmode:
18597 type = int_ftype_int_v4sf_v4sf;
18598 break;
18599 case V2DFmode:
18600 type = int_ftype_int_v2df_v2df;
18601 break;
18602 default:
18603 gcc_unreachable ();
18606 def_builtin (d->name, type, d->code);
18609 /* Initialize the abs* operators. */
18610 d = bdesc_abs;
18611 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
18613 machine_mode mode0;
18614 tree type;
18615 HOST_WIDE_INT mask = d->mask;
18617 if ((mask & builtin_mask) != mask)
18619 if (TARGET_DEBUG_BUILTIN)
18620 fprintf (stderr, "altivec_init_builtins, skip abs %s\n",
18621 d->name);
18622 continue;
18625 /* Cannot define builtin if the instruction is disabled. */
18626 gcc_assert (d->icode != CODE_FOR_nothing);
18627 mode0 = insn_data[d->icode].operand[0].mode;
18629 switch (mode0)
18631 case V2DImode:
18632 type = v2di_ftype_v2di;
18633 break;
18634 case V4SImode:
18635 type = v4si_ftype_v4si;
18636 break;
18637 case V8HImode:
18638 type = v8hi_ftype_v8hi;
18639 break;
18640 case V16QImode:
18641 type = v16qi_ftype_v16qi;
18642 break;
18643 case V4SFmode:
18644 type = v4sf_ftype_v4sf;
18645 break;
18646 case V2DFmode:
18647 type = v2df_ftype_v2df;
18648 break;
18649 default:
18650 gcc_unreachable ();
18653 def_builtin (d->name, type, d->code);
18656 /* Initialize target builtin that implements
18657 targetm.vectorize.builtin_mask_for_load. */
18659 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
18660 v16qi_ftype_long_pcvoid,
18661 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
18662 BUILT_IN_MD, NULL, NULL_TREE);
18663 TREE_READONLY (decl) = 1;
18664 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
18665 altivec_builtin_mask_for_load = decl;
18667 /* Access to the vec_init patterns. */
18668 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
18669 integer_type_node, integer_type_node,
18670 integer_type_node, NULL_TREE);
18671 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
18673 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
18674 short_integer_type_node,
18675 short_integer_type_node,
18676 short_integer_type_node,
18677 short_integer_type_node,
18678 short_integer_type_node,
18679 short_integer_type_node,
18680 short_integer_type_node, NULL_TREE);
18681 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
18683 ftype = build_function_type_list (V16QI_type_node, char_type_node,
18684 char_type_node, char_type_node,
18685 char_type_node, char_type_node,
18686 char_type_node, char_type_node,
18687 char_type_node, char_type_node,
18688 char_type_node, char_type_node,
18689 char_type_node, char_type_node,
18690 char_type_node, char_type_node,
18691 char_type_node, NULL_TREE);
18692 def_builtin ("__builtin_vec_init_v16qi", ftype,
18693 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
18695 ftype = build_function_type_list (V4SF_type_node, float_type_node,
18696 float_type_node, float_type_node,
18697 float_type_node, NULL_TREE);
18698 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
18700 /* VSX builtins. */
18701 ftype = build_function_type_list (V2DF_type_node, double_type_node,
18702 double_type_node, NULL_TREE);
18703 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
18705 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
18706 intDI_type_node, NULL_TREE);
18707 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
18709 /* Access to the vec_set patterns. */
18710 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
18711 intSI_type_node,
18712 integer_type_node, NULL_TREE);
18713 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
18715 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
18716 intHI_type_node,
18717 integer_type_node, NULL_TREE);
18718 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
18720 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
18721 intQI_type_node,
18722 integer_type_node, NULL_TREE);
18723 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
18725 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
18726 float_type_node,
18727 integer_type_node, NULL_TREE);
18728 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
18730 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
18731 double_type_node,
18732 integer_type_node, NULL_TREE);
18733 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
18735 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
18736 intDI_type_node,
18737 integer_type_node, NULL_TREE);
18738 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
18740 /* Access to the vec_extract patterns. */
18741 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
18742 integer_type_node, NULL_TREE);
18743 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
18745 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
18746 integer_type_node, NULL_TREE);
18747 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
18749 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
18750 integer_type_node, NULL_TREE);
18751 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
18753 ftype = build_function_type_list (float_type_node, V4SF_type_node,
18754 integer_type_node, NULL_TREE);
18755 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
18757 ftype = build_function_type_list (double_type_node, V2DF_type_node,
18758 integer_type_node, NULL_TREE);
18759 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
18761 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
18762 integer_type_node, NULL_TREE);
18763 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
18766 if (V1TI_type_node)
18768 tree v1ti_ftype_long_pcvoid
18769 = build_function_type_list (V1TI_type_node,
18770 long_integer_type_node, pcvoid_type_node,
18771 NULL_TREE);
18772 tree void_ftype_v1ti_long_pvoid
18773 = build_function_type_list (void_type_node,
18774 V1TI_type_node, long_integer_type_node,
18775 pvoid_type_node, NULL_TREE);
18776 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid,
18777 VSX_BUILTIN_LXVD2X_V1TI);
18778 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid,
18779 VSX_BUILTIN_STXVD2X_V1TI);
18780 ftype = build_function_type_list (V1TI_type_node, intTI_type_node,
18781 NULL_TREE, NULL_TREE);
18782 def_builtin ("__builtin_vec_init_v1ti", ftype, VSX_BUILTIN_VEC_INIT_V1TI);
18783 ftype = build_function_type_list (V1TI_type_node, V1TI_type_node,
18784 intTI_type_node,
18785 integer_type_node, NULL_TREE);
18786 def_builtin ("__builtin_vec_set_v1ti", ftype, VSX_BUILTIN_VEC_SET_V1TI);
18787 ftype = build_function_type_list (intTI_type_node, V1TI_type_node,
18788 integer_type_node, NULL_TREE);
18789 def_builtin ("__builtin_vec_ext_v1ti", ftype, VSX_BUILTIN_VEC_EXT_V1TI);
18794 static void
18795 htm_init_builtins (void)
18797 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
18798 const struct builtin_description *d;
18799 size_t i;
18801 d = bdesc_htm;
18802 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
18804 tree op[MAX_HTM_OPERANDS], type;
18805 HOST_WIDE_INT mask = d->mask;
18806 unsigned attr = rs6000_builtin_info[d->code].attr;
18807 bool void_func = (attr & RS6000_BTC_VOID);
18808 int attr_args = (attr & RS6000_BTC_TYPE_MASK);
18809 int nopnds = 0;
18810 tree gpr_type_node;
18811 tree rettype;
18812 tree argtype;
18814 /* It is expected that these htm built-in functions may have
18815 d->icode equal to CODE_FOR_nothing. */
18817 if (TARGET_32BIT && TARGET_POWERPC64)
18818 gpr_type_node = long_long_unsigned_type_node;
18819 else
18820 gpr_type_node = long_unsigned_type_node;
18822 if (attr & RS6000_BTC_SPR)
18824 rettype = gpr_type_node;
18825 argtype = gpr_type_node;
18827 else if (d->code == HTM_BUILTIN_TABORTDC
18828 || d->code == HTM_BUILTIN_TABORTDCI)
18830 rettype = unsigned_type_node;
18831 argtype = gpr_type_node;
18833 else
18835 rettype = unsigned_type_node;
18836 argtype = unsigned_type_node;
18839 if ((mask & builtin_mask) != mask)
18841 if (TARGET_DEBUG_BUILTIN)
18842 fprintf (stderr, "htm_builtin, skip binary %s\n", d->name);
18843 continue;
18846 if (d->name == 0)
18848 if (TARGET_DEBUG_BUILTIN)
18849 fprintf (stderr, "htm_builtin, bdesc_htm[%ld] no name\n",
18850 (long unsigned) i);
18851 continue;
18854 op[nopnds++] = (void_func) ? void_type_node : rettype;
18856 if (attr_args == RS6000_BTC_UNARY)
18857 op[nopnds++] = argtype;
18858 else if (attr_args == RS6000_BTC_BINARY)
18860 op[nopnds++] = argtype;
18861 op[nopnds++] = argtype;
18863 else if (attr_args == RS6000_BTC_TERNARY)
18865 op[nopnds++] = argtype;
18866 op[nopnds++] = argtype;
18867 op[nopnds++] = argtype;
18870 switch (nopnds)
18872 case 1:
18873 type = build_function_type_list (op[0], NULL_TREE);
18874 break;
18875 case 2:
18876 type = build_function_type_list (op[0], op[1], NULL_TREE);
18877 break;
18878 case 3:
18879 type = build_function_type_list (op[0], op[1], op[2], NULL_TREE);
18880 break;
18881 case 4:
18882 type = build_function_type_list (op[0], op[1], op[2], op[3],
18883 NULL_TREE);
18884 break;
18885 default:
18886 gcc_unreachable ();
18889 def_builtin (d->name, type, d->code);
18893 /* Hash function for builtin functions with up to 3 arguments and a return
18894 type. */
18895 hashval_t
18896 builtin_hasher::hash (builtin_hash_struct *bh)
18898 unsigned ret = 0;
18899 int i;
18901 for (i = 0; i < 4; i++)
18903 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
18904 ret = (ret * 2) + bh->uns_p[i];
18907 return ret;
18910 /* Compare builtin hash entries H1 and H2 for equivalence. */
18911 bool
18912 builtin_hasher::equal (builtin_hash_struct *p1, builtin_hash_struct *p2)
18914 return ((p1->mode[0] == p2->mode[0])
18915 && (p1->mode[1] == p2->mode[1])
18916 && (p1->mode[2] == p2->mode[2])
18917 && (p1->mode[3] == p2->mode[3])
18918 && (p1->uns_p[0] == p2->uns_p[0])
18919 && (p1->uns_p[1] == p2->uns_p[1])
18920 && (p1->uns_p[2] == p2->uns_p[2])
18921 && (p1->uns_p[3] == p2->uns_p[3]));
18924 /* Map types for builtin functions with an explicit return type and up to 3
18925 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
18926 of the argument. */
18927 static tree
18928 builtin_function_type (machine_mode mode_ret, machine_mode mode_arg0,
18929 machine_mode mode_arg1, machine_mode mode_arg2,
18930 enum rs6000_builtins builtin, const char *name)
18932 struct builtin_hash_struct h;
18933 struct builtin_hash_struct *h2;
18934 int num_args = 3;
18935 int i;
18936 tree ret_type = NULL_TREE;
18937 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
18939 /* Create builtin_hash_table. */
18940 if (builtin_hash_table == NULL)
18941 builtin_hash_table = hash_table<builtin_hasher>::create_ggc (1500);
18943 h.type = NULL_TREE;
18944 h.mode[0] = mode_ret;
18945 h.mode[1] = mode_arg0;
18946 h.mode[2] = mode_arg1;
18947 h.mode[3] = mode_arg2;
18948 h.uns_p[0] = 0;
18949 h.uns_p[1] = 0;
18950 h.uns_p[2] = 0;
18951 h.uns_p[3] = 0;
18953 /* If the builtin is a type that produces unsigned results or takes unsigned
18954 arguments, and it is returned as a decl for the vectorizer (such as
18955 widening multiplies, permute), make sure the arguments and return value
18956 are type correct. */
18957 switch (builtin)
18959 /* unsigned 1 argument functions. */
18960 case CRYPTO_BUILTIN_VSBOX:
18961 case P8V_BUILTIN_VGBBD:
18962 case MISC_BUILTIN_CDTBCD:
18963 case MISC_BUILTIN_CBCDTD:
18964 h.uns_p[0] = 1;
18965 h.uns_p[1] = 1;
18966 break;
18968 /* unsigned 2 argument functions. */
18969 case ALTIVEC_BUILTIN_VMULEUB:
18970 case ALTIVEC_BUILTIN_VMULEUH:
18971 case ALTIVEC_BUILTIN_VMULOUB:
18972 case ALTIVEC_BUILTIN_VMULOUH:
18973 case CRYPTO_BUILTIN_VCIPHER:
18974 case CRYPTO_BUILTIN_VCIPHERLAST:
18975 case CRYPTO_BUILTIN_VNCIPHER:
18976 case CRYPTO_BUILTIN_VNCIPHERLAST:
18977 case CRYPTO_BUILTIN_VPMSUMB:
18978 case CRYPTO_BUILTIN_VPMSUMH:
18979 case CRYPTO_BUILTIN_VPMSUMW:
18980 case CRYPTO_BUILTIN_VPMSUMD:
18981 case CRYPTO_BUILTIN_VPMSUM:
18982 case MISC_BUILTIN_ADDG6S:
18983 case MISC_BUILTIN_DIVWEU:
18984 case MISC_BUILTIN_DIVWEUO:
18985 case MISC_BUILTIN_DIVDEU:
18986 case MISC_BUILTIN_DIVDEUO:
18987 case VSX_BUILTIN_UDIV_V2DI:
18988 h.uns_p[0] = 1;
18989 h.uns_p[1] = 1;
18990 h.uns_p[2] = 1;
18991 break;
18993 /* unsigned 3 argument functions. */
18994 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
18995 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
18996 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
18997 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
18998 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
18999 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
19000 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
19001 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
19002 case VSX_BUILTIN_VPERM_16QI_UNS:
19003 case VSX_BUILTIN_VPERM_8HI_UNS:
19004 case VSX_BUILTIN_VPERM_4SI_UNS:
19005 case VSX_BUILTIN_VPERM_2DI_UNS:
19006 case VSX_BUILTIN_XXSEL_16QI_UNS:
19007 case VSX_BUILTIN_XXSEL_8HI_UNS:
19008 case VSX_BUILTIN_XXSEL_4SI_UNS:
19009 case VSX_BUILTIN_XXSEL_2DI_UNS:
19010 case CRYPTO_BUILTIN_VPERMXOR:
19011 case CRYPTO_BUILTIN_VPERMXOR_V2DI:
19012 case CRYPTO_BUILTIN_VPERMXOR_V4SI:
19013 case CRYPTO_BUILTIN_VPERMXOR_V8HI:
19014 case CRYPTO_BUILTIN_VPERMXOR_V16QI:
19015 case CRYPTO_BUILTIN_VSHASIGMAW:
19016 case CRYPTO_BUILTIN_VSHASIGMAD:
19017 case CRYPTO_BUILTIN_VSHASIGMA:
19018 h.uns_p[0] = 1;
19019 h.uns_p[1] = 1;
19020 h.uns_p[2] = 1;
19021 h.uns_p[3] = 1;
19022 break;
19024 /* signed permute functions with unsigned char mask. */
19025 case ALTIVEC_BUILTIN_VPERM_16QI:
19026 case ALTIVEC_BUILTIN_VPERM_8HI:
19027 case ALTIVEC_BUILTIN_VPERM_4SI:
19028 case ALTIVEC_BUILTIN_VPERM_4SF:
19029 case ALTIVEC_BUILTIN_VPERM_2DI:
19030 case ALTIVEC_BUILTIN_VPERM_2DF:
19031 case VSX_BUILTIN_VPERM_16QI:
19032 case VSX_BUILTIN_VPERM_8HI:
19033 case VSX_BUILTIN_VPERM_4SI:
19034 case VSX_BUILTIN_VPERM_4SF:
19035 case VSX_BUILTIN_VPERM_2DI:
19036 case VSX_BUILTIN_VPERM_2DF:
19037 h.uns_p[3] = 1;
19038 break;
19040 /* unsigned args, signed return. */
19041 case VSX_BUILTIN_XVCVUXDSP:
19042 case VSX_BUILTIN_XVCVUXDDP_UNS:
19043 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
19044 h.uns_p[1] = 1;
19045 break;
19047 /* signed args, unsigned return. */
19048 case VSX_BUILTIN_XVCVDPUXDS_UNS:
19049 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
19050 case MISC_BUILTIN_UNPACK_TD:
19051 case MISC_BUILTIN_UNPACK_V1TI:
19052 h.uns_p[0] = 1;
19053 break;
19055 /* unsigned arguments for 128-bit pack instructions. */
19056 case MISC_BUILTIN_PACK_TD:
19057 case MISC_BUILTIN_PACK_V1TI:
19058 h.uns_p[1] = 1;
19059 h.uns_p[2] = 1;
19060 break;
19062 default:
19063 break;
19066 /* Figure out how many args are present. */
19067 while (num_args > 0 && h.mode[num_args] == VOIDmode)
19068 num_args--;
19070 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
19071 if (!ret_type && h.uns_p[0])
19072 ret_type = builtin_mode_to_type[h.mode[0]][0];
19074 if (!ret_type)
19075 fatal_error (input_location,
19076 "internal error: builtin function %s had an unexpected "
19077 "return type %s", name, GET_MODE_NAME (h.mode[0]));
19079 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
19080 arg_type[i] = NULL_TREE;
19082 for (i = 0; i < num_args; i++)
19084 int m = (int) h.mode[i+1];
19085 int uns_p = h.uns_p[i+1];
19087 arg_type[i] = builtin_mode_to_type[m][uns_p];
19088 if (!arg_type[i] && uns_p)
19089 arg_type[i] = builtin_mode_to_type[m][0];
19091 if (!arg_type[i])
19092 fatal_error (input_location,
19093 "internal error: builtin function %s, argument %d "
19094 "had unexpected argument type %s", name, i,
19095 GET_MODE_NAME (m));
19098 builtin_hash_struct **found = builtin_hash_table->find_slot (&h, INSERT);
19099 if (*found == NULL)
19101 h2 = ggc_alloc<builtin_hash_struct> ();
19102 *h2 = h;
19103 *found = h2;
19105 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
19106 arg_type[2], NULL_TREE);
19109 return (*found)->type;
19112 static void
19113 rs6000_common_init_builtins (void)
19115 const struct builtin_description *d;
19116 size_t i;
19118 tree opaque_ftype_opaque = NULL_TREE;
19119 tree opaque_ftype_opaque_opaque = NULL_TREE;
19120 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
19121 tree v2si_ftype = NULL_TREE;
19122 tree v2si_ftype_qi = NULL_TREE;
19123 tree v2si_ftype_v2si_qi = NULL_TREE;
19124 tree v2si_ftype_int_qi = NULL_TREE;
19125 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
19127 if (!TARGET_PAIRED_FLOAT)
19129 builtin_mode_to_type[V2SImode][0] = opaque_V2SI_type_node;
19130 builtin_mode_to_type[V2SFmode][0] = opaque_V2SF_type_node;
19133 /* Paired and SPE builtins are only available if you build a compiler with
19134 the appropriate options, so only create those builtins with the
19135 appropriate compiler option. Create Altivec and VSX builtins on machines
19136 with at least the general purpose extensions (970 and newer) to allow the
19137 use of the target attribute.. */
19139 if (TARGET_EXTRA_BUILTINS)
19140 builtin_mask |= RS6000_BTM_COMMON;
19142 /* Add the ternary operators. */
19143 d = bdesc_3arg;
19144 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
19146 tree type;
19147 HOST_WIDE_INT mask = d->mask;
19149 if ((mask & builtin_mask) != mask)
19151 if (TARGET_DEBUG_BUILTIN)
19152 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
19153 continue;
19156 if (rs6000_overloaded_builtin_p (d->code))
19158 if (! (type = opaque_ftype_opaque_opaque_opaque))
19159 type = opaque_ftype_opaque_opaque_opaque
19160 = build_function_type_list (opaque_V4SI_type_node,
19161 opaque_V4SI_type_node,
19162 opaque_V4SI_type_node,
19163 opaque_V4SI_type_node,
19164 NULL_TREE);
19166 else
19168 enum insn_code icode = d->icode;
19169 if (d->name == 0)
19171 if (TARGET_DEBUG_BUILTIN)
19172 fprintf (stderr, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
19173 (long unsigned)i);
19175 continue;
19178 if (icode == CODE_FOR_nothing)
19180 if (TARGET_DEBUG_BUILTIN)
19181 fprintf (stderr, "rs6000_builtin, skip ternary %s (no code)\n",
19182 d->name);
19184 continue;
19187 type = builtin_function_type (insn_data[icode].operand[0].mode,
19188 insn_data[icode].operand[1].mode,
19189 insn_data[icode].operand[2].mode,
19190 insn_data[icode].operand[3].mode,
19191 d->code, d->name);
19194 def_builtin (d->name, type, d->code);
19197 /* Add the binary operators. */
19198 d = bdesc_2arg;
19199 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
19201 machine_mode mode0, mode1, mode2;
19202 tree type;
19203 HOST_WIDE_INT mask = d->mask;
19205 if ((mask & builtin_mask) != mask)
19207 if (TARGET_DEBUG_BUILTIN)
19208 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
19209 continue;
19212 if (rs6000_overloaded_builtin_p (d->code))
19214 if (! (type = opaque_ftype_opaque_opaque))
19215 type = opaque_ftype_opaque_opaque
19216 = build_function_type_list (opaque_V4SI_type_node,
19217 opaque_V4SI_type_node,
19218 opaque_V4SI_type_node,
19219 NULL_TREE);
19221 else
19223 enum insn_code icode = d->icode;
19224 if (d->name == 0)
19226 if (TARGET_DEBUG_BUILTIN)
19227 fprintf (stderr, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
19228 (long unsigned)i);
19230 continue;
19233 if (icode == CODE_FOR_nothing)
19235 if (TARGET_DEBUG_BUILTIN)
19236 fprintf (stderr, "rs6000_builtin, skip binary %s (no code)\n",
19237 d->name);
19239 continue;
19242 mode0 = insn_data[icode].operand[0].mode;
19243 mode1 = insn_data[icode].operand[1].mode;
19244 mode2 = insn_data[icode].operand[2].mode;
19246 if (mode0 == V2SImode && mode1 == V2SImode && mode2 == QImode)
19248 if (! (type = v2si_ftype_v2si_qi))
19249 type = v2si_ftype_v2si_qi
19250 = build_function_type_list (opaque_V2SI_type_node,
19251 opaque_V2SI_type_node,
19252 char_type_node,
19253 NULL_TREE);
19256 else if (mode0 == V2SImode && GET_MODE_CLASS (mode1) == MODE_INT
19257 && mode2 == QImode)
19259 if (! (type = v2si_ftype_int_qi))
19260 type = v2si_ftype_int_qi
19261 = build_function_type_list (opaque_V2SI_type_node,
19262 integer_type_node,
19263 char_type_node,
19264 NULL_TREE);
19267 else
19268 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
19269 d->code, d->name);
19272 def_builtin (d->name, type, d->code);
19275 /* Add the simple unary operators. */
19276 d = bdesc_1arg;
19277 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
19279 machine_mode mode0, mode1;
19280 tree type;
19281 HOST_WIDE_INT mask = d->mask;
19283 if ((mask & builtin_mask) != mask)
19285 if (TARGET_DEBUG_BUILTIN)
19286 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
19287 continue;
19290 if (rs6000_overloaded_builtin_p (d->code))
19292 if (! (type = opaque_ftype_opaque))
19293 type = opaque_ftype_opaque
19294 = build_function_type_list (opaque_V4SI_type_node,
19295 opaque_V4SI_type_node,
19296 NULL_TREE);
19298 else
19300 enum insn_code icode = d->icode;
19301 if (d->name == 0)
19303 if (TARGET_DEBUG_BUILTIN)
19304 fprintf (stderr, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
19305 (long unsigned)i);
19307 continue;
19310 if (icode == CODE_FOR_nothing)
19312 if (TARGET_DEBUG_BUILTIN)
19313 fprintf (stderr, "rs6000_builtin, skip unary %s (no code)\n",
19314 d->name);
19316 continue;
19319 mode0 = insn_data[icode].operand[0].mode;
19320 mode1 = insn_data[icode].operand[1].mode;
19322 if (mode0 == V2SImode && mode1 == QImode)
19324 if (! (type = v2si_ftype_qi))
19325 type = v2si_ftype_qi
19326 = build_function_type_list (opaque_V2SI_type_node,
19327 char_type_node,
19328 NULL_TREE);
19331 else
19332 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
19333 d->code, d->name);
19336 def_builtin (d->name, type, d->code);
19339 /* Add the simple no-argument operators. */
19340 d = bdesc_0arg;
19341 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
19343 machine_mode mode0;
19344 tree type;
19345 HOST_WIDE_INT mask = d->mask;
19347 if ((mask & builtin_mask) != mask)
19349 if (TARGET_DEBUG_BUILTIN)
19350 fprintf (stderr, "rs6000_builtin, skip no-argument %s\n", d->name);
19351 continue;
19353 if (rs6000_overloaded_builtin_p (d->code))
19355 if (!opaque_ftype_opaque)
19356 opaque_ftype_opaque
19357 = build_function_type_list (opaque_V4SI_type_node, NULL_TREE);
19358 type = opaque_ftype_opaque;
19360 else
19362 enum insn_code icode = d->icode;
19363 if (d->name == 0)
19365 if (TARGET_DEBUG_BUILTIN)
19366 fprintf (stderr, "rs6000_builtin, bdesc_0arg[%lu] no name\n",
19367 (long unsigned) i);
19368 continue;
19370 if (icode == CODE_FOR_nothing)
19372 if (TARGET_DEBUG_BUILTIN)
19373 fprintf (stderr,
19374 "rs6000_builtin, skip no-argument %s (no code)\n",
19375 d->name);
19376 continue;
19378 mode0 = insn_data[icode].operand[0].mode;
19379 if (mode0 == V2SImode)
19381 /* code for SPE */
19382 if (! (type = v2si_ftype))
19384 v2si_ftype
19385 = build_function_type_list (opaque_V2SI_type_node,
19386 NULL_TREE);
19387 type = v2si_ftype;
19390 else
19391 type = builtin_function_type (mode0, VOIDmode, VOIDmode, VOIDmode,
19392 d->code, d->name);
19394 def_builtin (d->name, type, d->code);
19398 /* Set up AIX/Darwin/64-bit Linux quad floating point routines. */
19399 static void
19400 init_float128_ibm (machine_mode mode)
19402 if (!TARGET_XL_COMPAT)
19404 set_optab_libfunc (add_optab, mode, "__gcc_qadd");
19405 set_optab_libfunc (sub_optab, mode, "__gcc_qsub");
19406 set_optab_libfunc (smul_optab, mode, "__gcc_qmul");
19407 set_optab_libfunc (sdiv_optab, mode, "__gcc_qdiv");
19409 if (!(TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)))
19411 set_optab_libfunc (neg_optab, mode, "__gcc_qneg");
19412 set_optab_libfunc (eq_optab, mode, "__gcc_qeq");
19413 set_optab_libfunc (ne_optab, mode, "__gcc_qne");
19414 set_optab_libfunc (gt_optab, mode, "__gcc_qgt");
19415 set_optab_libfunc (ge_optab, mode, "__gcc_qge");
19416 set_optab_libfunc (lt_optab, mode, "__gcc_qlt");
19417 set_optab_libfunc (le_optab, mode, "__gcc_qle");
19419 set_conv_libfunc (sext_optab, mode, SFmode, "__gcc_stoq");
19420 set_conv_libfunc (sext_optab, mode, DFmode, "__gcc_dtoq");
19421 set_conv_libfunc (trunc_optab, SFmode, mode, "__gcc_qtos");
19422 set_conv_libfunc (trunc_optab, DFmode, mode, "__gcc_qtod");
19423 set_conv_libfunc (sfix_optab, SImode, mode, "__gcc_qtoi");
19424 set_conv_libfunc (ufix_optab, SImode, mode, "__gcc_qtou");
19425 set_conv_libfunc (sfloat_optab, mode, SImode, "__gcc_itoq");
19426 set_conv_libfunc (ufloat_optab, mode, SImode, "__gcc_utoq");
19429 if (!(TARGET_HARD_FLOAT && TARGET_FPRS))
19430 set_optab_libfunc (unord_optab, mode, "__gcc_qunord");
19432 else
19434 set_optab_libfunc (add_optab, mode, "_xlqadd");
19435 set_optab_libfunc (sub_optab, mode, "_xlqsub");
19436 set_optab_libfunc (smul_optab, mode, "_xlqmul");
19437 set_optab_libfunc (sdiv_optab, mode, "_xlqdiv");
19440 /* Add various conversions for IFmode to use the traditional TFmode
19441 names. */
19442 if (mode == IFmode)
19444 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdtf2");
19445 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddtf2");
19446 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctftd2");
19447 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunctfsd2");
19448 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunctfdd2");
19449 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtdtf2");
19451 if (TARGET_POWERPC64)
19453 set_conv_libfunc (sfix_optab, TImode, mode, "__fixtfti");
19454 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunstfti");
19455 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattitf");
19456 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntitf");
19461 /* Set up IEEE 128-bit floating point routines. Use different names if the
19462 arguments can be passed in a vector register. The historical PowerPC
19463 implementation of IEEE 128-bit floating point used _q_<op> for the names, so
19464 continue to use that if we aren't using vector registers to pass IEEE
19465 128-bit floating point. */
19467 static void
19468 init_float128_ieee (machine_mode mode)
19470 if (FLOAT128_VECTOR_P (mode))
19472 set_optab_libfunc (add_optab, mode, "__addkf3");
19473 set_optab_libfunc (sub_optab, mode, "__subkf3");
19474 set_optab_libfunc (neg_optab, mode, "__negkf2");
19475 set_optab_libfunc (smul_optab, mode, "__mulkf3");
19476 set_optab_libfunc (sdiv_optab, mode, "__divkf3");
19477 set_optab_libfunc (sqrt_optab, mode, "__sqrtkf2");
19478 set_optab_libfunc (abs_optab, mode, "__abstkf2");
19480 set_optab_libfunc (eq_optab, mode, "__eqkf2");
19481 set_optab_libfunc (ne_optab, mode, "__nekf2");
19482 set_optab_libfunc (gt_optab, mode, "__gtkf2");
19483 set_optab_libfunc (ge_optab, mode, "__gekf2");
19484 set_optab_libfunc (lt_optab, mode, "__ltkf2");
19485 set_optab_libfunc (le_optab, mode, "__lekf2");
19486 set_optab_libfunc (unord_optab, mode, "__unordkf2");
19488 set_conv_libfunc (sext_optab, mode, SFmode, "__extendsfkf2");
19489 set_conv_libfunc (sext_optab, mode, DFmode, "__extenddfkf2");
19490 set_conv_libfunc (trunc_optab, SFmode, mode, "__trunckfsf2");
19491 set_conv_libfunc (trunc_optab, DFmode, mode, "__trunckfdf2");
19493 set_conv_libfunc (sext_optab, mode, IFmode, "__extendtfkf2");
19494 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
19495 set_conv_libfunc (sext_optab, mode, TFmode, "__extendtfkf2");
19497 set_conv_libfunc (trunc_optab, IFmode, mode, "__trunckftf2");
19498 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
19499 set_conv_libfunc (trunc_optab, TFmode, mode, "__trunckftf2");
19501 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdkf2");
19502 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddkf2");
19503 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunckftd2");
19504 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunckfsd2");
19505 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunckfdd2");
19506 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtdkf2");
19508 set_conv_libfunc (sfix_optab, SImode, mode, "__fixkfsi");
19509 set_conv_libfunc (ufix_optab, SImode, mode, "__fixunskfsi");
19510 set_conv_libfunc (sfix_optab, DImode, mode, "__fixkfdi");
19511 set_conv_libfunc (ufix_optab, DImode, mode, "__fixunskfdi");
19513 set_conv_libfunc (sfloat_optab, mode, SImode, "__floatsikf");
19514 set_conv_libfunc (ufloat_optab, mode, SImode, "__floatunsikf");
19515 set_conv_libfunc (sfloat_optab, mode, DImode, "__floatdikf");
19516 set_conv_libfunc (ufloat_optab, mode, DImode, "__floatundikf");
19518 if (TARGET_POWERPC64)
19520 set_conv_libfunc (sfix_optab, TImode, mode, "__fixkfti");
19521 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunskfti");
19522 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattikf");
19523 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntikf");
19527 else
19529 set_optab_libfunc (add_optab, mode, "_q_add");
19530 set_optab_libfunc (sub_optab, mode, "_q_sub");
19531 set_optab_libfunc (neg_optab, mode, "_q_neg");
19532 set_optab_libfunc (smul_optab, mode, "_q_mul");
19533 set_optab_libfunc (sdiv_optab, mode, "_q_div");
19534 if (TARGET_PPC_GPOPT)
19535 set_optab_libfunc (sqrt_optab, mode, "_q_sqrt");
19537 set_optab_libfunc (eq_optab, mode, "_q_feq");
19538 set_optab_libfunc (ne_optab, mode, "_q_fne");
19539 set_optab_libfunc (gt_optab, mode, "_q_fgt");
19540 set_optab_libfunc (ge_optab, mode, "_q_fge");
19541 set_optab_libfunc (lt_optab, mode, "_q_flt");
19542 set_optab_libfunc (le_optab, mode, "_q_fle");
19544 set_conv_libfunc (sext_optab, mode, SFmode, "_q_stoq");
19545 set_conv_libfunc (sext_optab, mode, DFmode, "_q_dtoq");
19546 set_conv_libfunc (trunc_optab, SFmode, mode, "_q_qtos");
19547 set_conv_libfunc (trunc_optab, DFmode, mode, "_q_qtod");
19548 set_conv_libfunc (sfix_optab, SImode, mode, "_q_qtoi");
19549 set_conv_libfunc (ufix_optab, SImode, mode, "_q_qtou");
19550 set_conv_libfunc (sfloat_optab, mode, SImode, "_q_itoq");
19551 set_conv_libfunc (ufloat_optab, mode, SImode, "_q_utoq");
19555 static void
19556 rs6000_init_libfuncs (void)
19558 /* __float128 support. */
19559 if (TARGET_FLOAT128_TYPE)
19561 init_float128_ibm (IFmode);
19562 init_float128_ieee (KFmode);
19565 /* AIX/Darwin/64-bit Linux quad floating point routines. */
19566 if (TARGET_LONG_DOUBLE_128)
19568 if (!TARGET_IEEEQUAD)
19569 init_float128_ibm (TFmode);
19571 /* IEEE 128-bit including 32-bit SVR4 quad floating point routines. */
19572 else
19573 init_float128_ieee (TFmode);
19578 /* Expand a block clear operation, and return 1 if successful. Return 0
19579 if we should let the compiler generate normal code.
19581 operands[0] is the destination
19582 operands[1] is the length
19583 operands[3] is the alignment */
19586 expand_block_clear (rtx operands[])
19588 rtx orig_dest = operands[0];
19589 rtx bytes_rtx = operands[1];
19590 rtx align_rtx = operands[3];
19591 bool constp = (GET_CODE (bytes_rtx) == CONST_INT);
19592 HOST_WIDE_INT align;
19593 HOST_WIDE_INT bytes;
19594 int offset;
19595 int clear_bytes;
19596 int clear_step;
19598 /* If this is not a fixed size move, just call memcpy */
19599 if (! constp)
19600 return 0;
19602 /* This must be a fixed size alignment */
19603 gcc_assert (GET_CODE (align_rtx) == CONST_INT);
19604 align = INTVAL (align_rtx) * BITS_PER_UNIT;
19606 /* Anything to clear? */
19607 bytes = INTVAL (bytes_rtx);
19608 if (bytes <= 0)
19609 return 1;
19611 /* Use the builtin memset after a point, to avoid huge code bloat.
19612 When optimize_size, avoid any significant code bloat; calling
19613 memset is about 4 instructions, so allow for one instruction to
19614 load zero and three to do clearing. */
19615 if (TARGET_ALTIVEC && align >= 128)
19616 clear_step = 16;
19617 else if (TARGET_POWERPC64 && (align >= 64 || !STRICT_ALIGNMENT))
19618 clear_step = 8;
19619 else if (TARGET_SPE && align >= 64)
19620 clear_step = 8;
19621 else
19622 clear_step = 4;
19624 if (optimize_size && bytes > 3 * clear_step)
19625 return 0;
19626 if (! optimize_size && bytes > 8 * clear_step)
19627 return 0;
19629 for (offset = 0; bytes > 0; offset += clear_bytes, bytes -= clear_bytes)
19631 machine_mode mode = BLKmode;
19632 rtx dest;
19634 if (bytes >= 16 && TARGET_ALTIVEC && align >= 128)
19636 clear_bytes = 16;
19637 mode = V4SImode;
19639 else if (bytes >= 8 && TARGET_SPE && align >= 64)
19641 clear_bytes = 8;
19642 mode = V2SImode;
19644 else if (bytes >= 8 && TARGET_POWERPC64
19645 && (align >= 64 || !STRICT_ALIGNMENT))
19647 clear_bytes = 8;
19648 mode = DImode;
19649 if (offset == 0 && align < 64)
19651 rtx addr;
19653 /* If the address form is reg+offset with offset not a
19654 multiple of four, reload into reg indirect form here
19655 rather than waiting for reload. This way we get one
19656 reload, not one per store. */
19657 addr = XEXP (orig_dest, 0);
19658 if ((GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
19659 && GET_CODE (XEXP (addr, 1)) == CONST_INT
19660 && (INTVAL (XEXP (addr, 1)) & 3) != 0)
19662 addr = copy_addr_to_reg (addr);
19663 orig_dest = replace_equiv_address (orig_dest, addr);
19667 else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
19668 { /* move 4 bytes */
19669 clear_bytes = 4;
19670 mode = SImode;
19672 else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
19673 { /* move 2 bytes */
19674 clear_bytes = 2;
19675 mode = HImode;
19677 else /* move 1 byte at a time */
19679 clear_bytes = 1;
19680 mode = QImode;
19683 dest = adjust_address (orig_dest, mode, offset);
19685 emit_move_insn (dest, CONST0_RTX (mode));
19688 return 1;
19691 /* Emit a potentially record-form instruction, setting DST from SRC.
19692 If DOT is 0, that is all; otherwise, set CCREG to the result of the
19693 signed comparison of DST with zero. If DOT is 1, the generated RTL
19694 doesn't care about the DST result; if DOT is 2, it does. If CCREG
19695 is CR0 do a single dot insn (as a PARALLEL); otherwise, do a SET and
19696 a separate COMPARE. */
19698 static void
19699 rs6000_emit_dot_insn (rtx dst, rtx src, int dot, rtx ccreg)
19701 if (dot == 0)
19703 emit_move_insn (dst, src);
19704 return;
19707 if (cc_reg_not_cr0_operand (ccreg, CCmode))
19709 emit_move_insn (dst, src);
19710 emit_move_insn (ccreg, gen_rtx_COMPARE (CCmode, dst, const0_rtx));
19711 return;
19714 rtx ccset = gen_rtx_SET (ccreg, gen_rtx_COMPARE (CCmode, src, const0_rtx));
19715 if (dot == 1)
19717 rtx clobber = gen_rtx_CLOBBER (VOIDmode, dst);
19718 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, clobber)));
19720 else
19722 rtx set = gen_rtx_SET (dst, src);
19723 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, set)));
19727 /* Figure out the correct instructions to generate to load data for
19728 block compare. MODE is used for the read from memory, and
19729 data is zero extended if REG is wider than MODE. If LE code
19730 is being generated, bswap loads are used.
19732 REG is the destination register to move the data into.
19733 MEM is the memory block being read.
19734 MODE is the mode of memory to use for the read. */
19735 static void
19736 do_load_for_compare (rtx reg, rtx mem, machine_mode mode)
19738 switch (GET_MODE (reg))
19740 case DImode:
19741 switch (mode)
19743 case QImode:
19744 emit_insn (gen_zero_extendqidi2 (reg, mem));
19745 break;
19746 case HImode:
19748 rtx src = mem;
19749 if (!BYTES_BIG_ENDIAN)
19751 src = gen_reg_rtx (HImode);
19752 emit_insn (gen_bswaphi2 (src, mem));
19754 emit_insn (gen_zero_extendhidi2 (reg, src));
19755 break;
19757 case SImode:
19759 rtx src = mem;
19760 if (!BYTES_BIG_ENDIAN)
19762 src = gen_reg_rtx (SImode);
19763 emit_insn (gen_bswapsi2 (src, mem));
19765 emit_insn (gen_zero_extendsidi2 (reg, src));
19767 break;
19768 case DImode:
19769 if (!BYTES_BIG_ENDIAN)
19770 emit_insn (gen_bswapdi2 (reg, mem));
19771 else
19772 emit_insn (gen_movdi (reg, mem));
19773 break;
19774 default:
19775 gcc_unreachable ();
19777 break;
19779 case SImode:
19780 switch (mode)
19782 case QImode:
19783 emit_insn (gen_zero_extendqisi2 (reg, mem));
19784 break;
19785 case HImode:
19787 rtx src = mem;
19788 if (!BYTES_BIG_ENDIAN)
19790 src = gen_reg_rtx (HImode);
19791 emit_insn (gen_bswaphi2 (src, mem));
19793 emit_insn (gen_zero_extendhisi2 (reg, src));
19794 break;
19796 case SImode:
19797 if (!BYTES_BIG_ENDIAN)
19798 emit_insn (gen_bswapsi2 (reg, mem));
19799 else
19800 emit_insn (gen_movsi (reg, mem));
19801 break;
19802 case DImode:
19803 /* DImode is larger than the destination reg so is not expected. */
19804 gcc_unreachable ();
19805 break;
19806 default:
19807 gcc_unreachable ();
19809 break;
19810 default:
19811 gcc_unreachable ();
19812 break;
19816 /* Select the mode to be used for reading the next chunk of bytes
19817 in the compare.
19819 OFFSET is the current read offset from the beginning of the block.
19820 BYTES is the number of bytes remaining to be read.
19821 ALIGN is the minimum alignment of the memory blocks being compared in bytes.
19822 WORD_MODE_OK indicates using WORD_MODE is allowed, else SImode is
19823 the largest allowable mode. */
19824 static machine_mode
19825 select_block_compare_mode (unsigned HOST_WIDE_INT offset,
19826 unsigned HOST_WIDE_INT bytes,
19827 unsigned HOST_WIDE_INT align, bool word_mode_ok)
19829 /* First see if we can do a whole load unit
19830 as that will be more efficient than a larger load + shift. */
19832 /* If big, use biggest chunk.
19833 If exactly chunk size, use that size.
19834 If remainder can be done in one piece with shifting, do that.
19835 Do largest chunk possible without violating alignment rules. */
19837 /* The most we can read without potential page crossing. */
19838 unsigned HOST_WIDE_INT maxread = ROUND_UP (bytes, align);
19840 if (word_mode_ok && bytes >= UNITS_PER_WORD)
19841 return word_mode;
19842 else if (bytes == GET_MODE_SIZE (SImode))
19843 return SImode;
19844 else if (bytes == GET_MODE_SIZE (HImode))
19845 return HImode;
19846 else if (bytes == GET_MODE_SIZE (QImode))
19847 return QImode;
19848 else if (bytes < GET_MODE_SIZE (SImode)
19849 && offset >= GET_MODE_SIZE (SImode) - bytes)
19850 /* This matches the case were we have SImode and 3 bytes
19851 and offset >= 1 and permits us to move back one and overlap
19852 with the previous read, thus avoiding having to shift
19853 unwanted bytes off of the input. */
19854 return SImode;
19855 else if (word_mode_ok && bytes < UNITS_PER_WORD
19856 && offset >= UNITS_PER_WORD-bytes)
19857 /* Similarly, if we can use DImode it will get matched here and
19858 can do an overlapping read that ends at the end of the block. */
19859 return word_mode;
19860 else if (word_mode_ok && maxread >= UNITS_PER_WORD)
19861 /* It is safe to do all remaining in one load of largest size,
19862 possibly with a shift to get rid of unwanted bytes. */
19863 return word_mode;
19864 else if (maxread >= GET_MODE_SIZE (SImode))
19865 /* It is safe to do all remaining in one SImode load,
19866 possibly with a shift to get rid of unwanted bytes. */
19867 return SImode;
19868 else if (bytes > GET_MODE_SIZE (SImode))
19869 return SImode;
19870 else if (bytes > GET_MODE_SIZE (HImode))
19871 return HImode;
19873 /* final fallback is do one byte */
19874 return QImode;
19877 /* Compute the alignment of pointer+OFFSET where the original alignment
19878 of pointer was BASE_ALIGN. */
19879 static unsigned HOST_WIDE_INT
19880 compute_current_alignment (unsigned HOST_WIDE_INT base_align,
19881 unsigned HOST_WIDE_INT offset)
19883 if (offset == 0)
19884 return base_align;
19885 return min (base_align, offset & -offset);
19888 /* Expand a block compare operation, and return true if successful.
19889 Return false if we should let the compiler generate normal code,
19890 probably a memcmp call.
19892 OPERANDS[0] is the target (result).
19893 OPERANDS[1] is the first source.
19894 OPERANDS[2] is the second source.
19895 OPERANDS[3] is the length.
19896 OPERANDS[4] is the alignment. */
19897 bool
19898 expand_block_compare (rtx operands[])
19900 rtx target = operands[0];
19901 rtx orig_src1 = operands[1];
19902 rtx orig_src2 = operands[2];
19903 rtx bytes_rtx = operands[3];
19904 rtx align_rtx = operands[4];
19905 HOST_WIDE_INT cmp_bytes = 0;
19906 rtx src1 = orig_src1;
19907 rtx src2 = orig_src2;
19909 /* This case is complicated to handle because the subtract
19910 with carry instructions do not generate the 64-bit
19911 carry and so we must emit code to calculate it ourselves.
19912 We choose not to implement this yet. */
19913 if (TARGET_32BIT && TARGET_POWERPC64)
19914 return false;
19916 /* If this is not a fixed size compare, just call memcmp. */
19917 if (!CONST_INT_P (bytes_rtx))
19918 return false;
19920 /* This must be a fixed size alignment. */
19921 if (!CONST_INT_P (align_rtx))
19922 return false;
19924 unsigned int base_align = UINTVAL (align_rtx) / BITS_PER_UNIT;
19926 /* SLOW_UNALIGNED_ACCESS -- don't do unaligned stuff. */
19927 if (SLOW_UNALIGNED_ACCESS (word_mode, MEM_ALIGN (orig_src1))
19928 || SLOW_UNALIGNED_ACCESS (word_mode, MEM_ALIGN (orig_src2)))
19929 return false;
19931 gcc_assert (GET_MODE (target) == SImode);
19933 /* Anything to move? */
19934 unsigned HOST_WIDE_INT bytes = UINTVAL (bytes_rtx);
19935 if (bytes == 0)
19936 return true;
19938 /* The code generated for p7 and older is not faster than glibc
19939 memcmp if alignment is small and length is not short, so bail
19940 out to avoid those conditions. */
19941 if (!TARGET_EFFICIENT_OVERLAPPING_UNALIGNED
19942 && ((base_align == 1 && bytes > 16)
19943 || (base_align == 2 && bytes > 32)))
19944 return false;
19946 rtx tmp_reg_src1 = gen_reg_rtx (word_mode);
19947 rtx tmp_reg_src2 = gen_reg_rtx (word_mode);
19948 /* P7/P8 code uses cond for subfc. but P9 uses
19949 it for cmpld which needs CCUNSmode. */
19950 rtx cond;
19951 if (TARGET_P9_MISC)
19952 cond = gen_reg_rtx (CCUNSmode);
19953 else
19954 cond = gen_reg_rtx (CCmode);
19956 /* If we have an LE target without ldbrx and word_mode is DImode,
19957 then we must avoid using word_mode. */
19958 int word_mode_ok = !(!BYTES_BIG_ENDIAN && !TARGET_LDBRX
19959 && word_mode == DImode);
19961 /* Strategy phase. How many ops will this take and should we expand it? */
19963 unsigned HOST_WIDE_INT offset = 0;
19964 machine_mode load_mode =
19965 select_block_compare_mode (offset, bytes, base_align, word_mode_ok);
19966 unsigned int load_mode_size = GET_MODE_SIZE (load_mode);
19968 /* We don't want to generate too much code. */
19969 unsigned HOST_WIDE_INT max_bytes =
19970 load_mode_size * (unsigned HOST_WIDE_INT) rs6000_block_compare_inline_limit;
19971 if (!IN_RANGE (bytes, 1, max_bytes))
19972 return false;
19974 bool generate_6432_conversion = false;
19975 rtx convert_label = NULL;
19976 rtx final_label = NULL;
19978 /* Example of generated code for 18 bytes aligned 1 byte.
19979 Compiled with -fno-reorder-blocks for clarity.
19980 ldbrx 10,31,8
19981 ldbrx 9,7,8
19982 subfc. 9,9,10
19983 bne 0,.L6487
19984 addi 9,12,8
19985 addi 5,11,8
19986 ldbrx 10,0,9
19987 ldbrx 9,0,5
19988 subfc. 9,9,10
19989 bne 0,.L6487
19990 addi 9,12,16
19991 lhbrx 10,0,9
19992 addi 9,11,16
19993 lhbrx 9,0,9
19994 subf 9,9,10
19995 b .L6488
19996 .p2align 4,,15
19997 .L6487: #convert_label
19998 popcntd 9,9
19999 subfe 10,10,10
20000 or 9,9,10
20001 .L6488: #final_label
20002 extsw 10,9
20004 We start off with DImode for two blocks that jump to the DI->SI conversion
20005 if the difference is found there, then a final block of HImode that skips
20006 the DI->SI conversion. */
20008 while (bytes > 0)
20010 unsigned int align = compute_current_alignment (base_align, offset);
20011 if (TARGET_EFFICIENT_OVERLAPPING_UNALIGNED)
20012 load_mode = select_block_compare_mode (offset, bytes, align,
20013 word_mode_ok);
20014 else
20015 load_mode = select_block_compare_mode (0, bytes, align, word_mode_ok);
20016 load_mode_size = GET_MODE_SIZE (load_mode);
20017 if (bytes >= load_mode_size)
20018 cmp_bytes = load_mode_size;
20019 else if (TARGET_EFFICIENT_OVERLAPPING_UNALIGNED)
20021 /* Move this load back so it doesn't go past the end.
20022 P8/P9 can do this efficiently. */
20023 unsigned int extra_bytes = load_mode_size - bytes;
20024 cmp_bytes = bytes;
20025 if (extra_bytes < offset)
20027 offset -= extra_bytes;
20028 cmp_bytes = load_mode_size;
20029 bytes = cmp_bytes;
20032 else
20033 /* P7 and earlier can't do the overlapping load trick fast,
20034 so this forces a non-overlapping load and a shift to get
20035 rid of the extra bytes. */
20036 cmp_bytes = bytes;
20038 src1 = adjust_address (orig_src1, load_mode, offset);
20039 src2 = adjust_address (orig_src2, load_mode, offset);
20041 if (!REG_P (XEXP (src1, 0)))
20043 rtx src1_reg = copy_addr_to_reg (XEXP (src1, 0));
20044 src1 = replace_equiv_address (src1, src1_reg);
20046 set_mem_size (src1, cmp_bytes);
20048 if (!REG_P (XEXP (src2, 0)))
20050 rtx src2_reg = copy_addr_to_reg (XEXP (src2, 0));
20051 src2 = replace_equiv_address (src2, src2_reg);
20053 set_mem_size (src2, cmp_bytes);
20055 do_load_for_compare (tmp_reg_src1, src1, load_mode);
20056 do_load_for_compare (tmp_reg_src2, src2, load_mode);
20058 if (cmp_bytes < load_mode_size)
20060 /* Shift unneeded bytes off. */
20061 rtx sh = GEN_INT (BITS_PER_UNIT * (load_mode_size - cmp_bytes));
20062 if (word_mode == DImode)
20064 emit_insn (gen_lshrdi3 (tmp_reg_src1, tmp_reg_src1, sh));
20065 emit_insn (gen_lshrdi3 (tmp_reg_src2, tmp_reg_src2, sh));
20067 else
20069 emit_insn (gen_lshrsi3 (tmp_reg_src1, tmp_reg_src1, sh));
20070 emit_insn (gen_lshrsi3 (tmp_reg_src2, tmp_reg_src2, sh));
20074 int remain = bytes - cmp_bytes;
20075 if (GET_MODE_SIZE (GET_MODE (target)) > GET_MODE_SIZE (load_mode))
20077 /* Target is larger than load size so we don't need to
20078 reduce result size. */
20080 /* We previously did a block that need 64->32 conversion but
20081 the current block does not, so a label is needed to jump
20082 to the end. */
20083 if (generate_6432_conversion && !final_label)
20084 final_label = gen_label_rtx ();
20086 if (remain > 0)
20088 /* This is not the last block, branch to the end if the result
20089 of this subtract is not zero. */
20090 if (!final_label)
20091 final_label = gen_label_rtx ();
20092 rtx fin_ref = gen_rtx_LABEL_REF (VOIDmode, final_label);
20093 rtx tmp = gen_rtx_MINUS (word_mode, tmp_reg_src1, tmp_reg_src2);
20094 rtx cr = gen_reg_rtx (CCmode);
20095 rs6000_emit_dot_insn (tmp_reg_src2, tmp, 2, cr);
20096 emit_insn (gen_movsi (target,
20097 gen_lowpart (SImode, tmp_reg_src2)));
20098 rtx ne_rtx = gen_rtx_NE (VOIDmode, cr, const0_rtx);
20099 rtx ifelse = gen_rtx_IF_THEN_ELSE (VOIDmode, ne_rtx,
20100 fin_ref, pc_rtx);
20101 rtx j = emit_jump_insn (gen_rtx_SET (pc_rtx, ifelse));
20102 JUMP_LABEL (j) = final_label;
20103 LABEL_NUSES (final_label) += 1;
20105 else
20107 if (word_mode == DImode)
20109 emit_insn (gen_subdi3 (tmp_reg_src2, tmp_reg_src1,
20110 tmp_reg_src2));
20111 emit_insn (gen_movsi (target,
20112 gen_lowpart (SImode, tmp_reg_src2)));
20114 else
20115 emit_insn (gen_subsi3 (target, tmp_reg_src1, tmp_reg_src2));
20117 if (final_label)
20119 rtx fin_ref = gen_rtx_LABEL_REF (VOIDmode, final_label);
20120 rtx j = emit_jump_insn (gen_rtx_SET (pc_rtx, fin_ref));
20121 JUMP_LABEL(j) = final_label;
20122 LABEL_NUSES (final_label) += 1;
20123 emit_barrier ();
20127 else
20129 /* Do we need a 64->32 conversion block? We need the 64->32
20130 conversion even if target size == load_mode size because
20131 the subtract generates one extra bit. */
20132 generate_6432_conversion = true;
20134 if (remain > 0)
20136 if (!convert_label)
20137 convert_label = gen_label_rtx ();
20139 /* Compare to zero and branch to convert_label if not zero. */
20140 rtx cvt_ref = gen_rtx_LABEL_REF (VOIDmode, convert_label);
20141 if (TARGET_P9_MISC)
20143 /* Generate a compare, and convert with a setb later. */
20144 rtx cmp = gen_rtx_COMPARE (CCUNSmode, tmp_reg_src1,
20145 tmp_reg_src2);
20146 emit_insn (gen_rtx_SET (cond, cmp));
20148 else
20149 /* Generate a subfc. and use the longer
20150 sequence for conversion. */
20151 if (TARGET_64BIT)
20152 emit_insn (gen_subfdi3_carry_dot2 (tmp_reg_src2, tmp_reg_src2,
20153 tmp_reg_src1, cond));
20154 else
20155 emit_insn (gen_subfsi3_carry_dot2 (tmp_reg_src2, tmp_reg_src2,
20156 tmp_reg_src1, cond));
20157 rtx ne_rtx = gen_rtx_NE (VOIDmode, cond, const0_rtx);
20158 rtx ifelse = gen_rtx_IF_THEN_ELSE (VOIDmode, ne_rtx,
20159 cvt_ref, pc_rtx);
20160 rtx j = emit_jump_insn (gen_rtx_SET (pc_rtx, ifelse));
20161 JUMP_LABEL(j) = convert_label;
20162 LABEL_NUSES (convert_label) += 1;
20164 else
20166 /* Just do the subtract/compare. Since this is the last block
20167 the convert code will be generated immediately following. */
20168 if (TARGET_P9_MISC)
20170 rtx cmp = gen_rtx_COMPARE (CCUNSmode, tmp_reg_src1,
20171 tmp_reg_src2);
20172 emit_insn (gen_rtx_SET (cond, cmp));
20174 else
20175 if (TARGET_64BIT)
20176 emit_insn (gen_subfdi3_carry (tmp_reg_src2, tmp_reg_src2,
20177 tmp_reg_src1));
20178 else
20179 emit_insn (gen_subfsi3_carry (tmp_reg_src2, tmp_reg_src2,
20180 tmp_reg_src1));
20184 offset += cmp_bytes;
20185 bytes -= cmp_bytes;
20188 if (generate_6432_conversion)
20190 if (convert_label)
20191 emit_label (convert_label);
20193 /* We need to produce DI result from sub, then convert to target SI
20194 while maintaining <0 / ==0 / >0 properties. This sequence works:
20195 subfc L,A,B
20196 subfe H,H,H
20197 popcntd L,L
20198 rldimi L,H,6,0
20200 This is an alternate one Segher cooked up if somebody
20201 wants to expand this for something that doesn't have popcntd:
20202 subfc L,a,b
20203 subfe H,x,x
20204 addic t,L,-1
20205 subfe v,t,L
20206 or z,v,H
20208 And finally, p9 can just do this:
20209 cmpld A,B
20210 setb r */
20212 if (TARGET_P9_MISC)
20214 emit_insn (gen_setb_unsigned (target, cond));
20216 else
20218 if (TARGET_64BIT)
20220 rtx tmp_reg_ca = gen_reg_rtx (DImode);
20221 emit_insn (gen_subfdi3_carry_in_xx (tmp_reg_ca));
20222 emit_insn (gen_popcntddi2 (tmp_reg_src2, tmp_reg_src2));
20223 emit_insn (gen_iordi3 (tmp_reg_src2, tmp_reg_src2, tmp_reg_ca));
20224 emit_insn (gen_movsi (target, gen_lowpart (SImode, tmp_reg_src2)));
20226 else
20228 rtx tmp_reg_ca = gen_reg_rtx (SImode);
20229 emit_insn (gen_subfsi3_carry_in_xx (tmp_reg_ca));
20230 emit_insn (gen_popcntdsi2 (tmp_reg_src2, tmp_reg_src2));
20231 emit_insn (gen_iorsi3 (target, tmp_reg_src2, tmp_reg_ca));
20236 if (final_label)
20237 emit_label (final_label);
20239 gcc_assert (bytes == 0);
20240 return true;
20243 /* Generate alignment check and branch code to set up for
20244 strncmp when we don't have DI alignment.
20245 STRNCMP_LABEL is the label to branch if there is a page crossing.
20246 SRC is the string pointer to be examined.
20247 BYTES is the max number of bytes to compare. */
20248 static void
20249 expand_strncmp_align_check (rtx strncmp_label, rtx src, HOST_WIDE_INT bytes)
20251 rtx lab_ref = gen_rtx_LABEL_REF (VOIDmode, strncmp_label);
20252 rtx src_check = copy_addr_to_reg (XEXP (src, 0));
20253 if (GET_MODE (src_check) == SImode)
20254 emit_insn (gen_andsi3 (src_check, src_check, GEN_INT (0xfff)));
20255 else
20256 emit_insn (gen_anddi3 (src_check, src_check, GEN_INT (0xfff)));
20257 rtx cond = gen_reg_rtx (CCmode);
20258 emit_move_insn (cond, gen_rtx_COMPARE (CCmode, src_check,
20259 GEN_INT (4096 - bytes)));
20261 rtx cmp_rtx = gen_rtx_LT (VOIDmode, cond, const0_rtx);
20263 rtx ifelse = gen_rtx_IF_THEN_ELSE (VOIDmode, cmp_rtx,
20264 pc_rtx, lab_ref);
20265 rtx j = emit_jump_insn (gen_rtx_SET (pc_rtx, ifelse));
20266 JUMP_LABEL (j) = strncmp_label;
20267 LABEL_NUSES (strncmp_label) += 1;
20270 /* Expand a string compare operation with length, and return
20271 true if successful. Return false if we should let the
20272 compiler generate normal code, probably a strncmp call.
20274 OPERANDS[0] is the target (result).
20275 OPERANDS[1] is the first source.
20276 OPERANDS[2] is the second source.
20277 If NO_LENGTH is zero, then:
20278 OPERANDS[3] is the length.
20279 OPERANDS[4] is the alignment in bytes.
20280 If NO_LENGTH is nonzero, then:
20281 OPERANDS[3] is the alignment in bytes. */
20282 bool
20283 expand_strn_compare (rtx operands[], int no_length)
20285 rtx target = operands[0];
20286 rtx orig_src1 = operands[1];
20287 rtx orig_src2 = operands[2];
20288 rtx bytes_rtx, align_rtx;
20289 if (no_length)
20291 bytes_rtx = NULL;
20292 align_rtx = operands[3];
20294 else
20296 bytes_rtx = operands[3];
20297 align_rtx = operands[4];
20299 unsigned HOST_WIDE_INT cmp_bytes = 0;
20300 rtx src1 = orig_src1;
20301 rtx src2 = orig_src2;
20303 /* If we have a length, it must be constant. This simplifies things
20304 a bit as we don't have to generate code to check if we've exceeded
20305 the length. Later this could be expanded to handle this case. */
20306 if (!no_length && !CONST_INT_P (bytes_rtx))
20307 return false;
20309 /* This must be a fixed size alignment. */
20310 if (!CONST_INT_P (align_rtx))
20311 return false;
20313 unsigned int base_align = UINTVAL (align_rtx);
20314 int align1 = MEM_ALIGN (orig_src1) / BITS_PER_UNIT;
20315 int align2 = MEM_ALIGN (orig_src2) / BITS_PER_UNIT;
20317 /* SLOW_UNALIGNED_ACCESS -- don't do unaligned stuff. */
20318 if (SLOW_UNALIGNED_ACCESS (word_mode, align1)
20319 || SLOW_UNALIGNED_ACCESS (word_mode, align2))
20320 return false;
20322 gcc_assert (GET_MODE (target) == SImode);
20324 /* If we have an LE target without ldbrx and word_mode is DImode,
20325 then we must avoid using word_mode. */
20326 int word_mode_ok = !(!BYTES_BIG_ENDIAN && !TARGET_LDBRX
20327 && word_mode == DImode);
20329 unsigned int word_mode_size = GET_MODE_SIZE (word_mode);
20331 unsigned HOST_WIDE_INT offset = 0;
20332 unsigned HOST_WIDE_INT bytes; /* N from the strncmp args if available. */
20333 unsigned HOST_WIDE_INT compare_length; /* How much to compare inline. */
20334 if (no_length)
20335 /* Use this as a standin to determine the mode to use. */
20336 bytes = rs6000_string_compare_inline_limit * word_mode_size;
20337 else
20338 bytes = UINTVAL (bytes_rtx);
20340 machine_mode load_mode =
20341 select_block_compare_mode (offset, bytes, base_align, word_mode_ok);
20342 unsigned int load_mode_size = GET_MODE_SIZE (load_mode);
20343 compare_length = rs6000_string_compare_inline_limit * load_mode_size;
20345 /* If we have equality at the end of the last compare and we have not
20346 found the end of the string, we need to call strcmp/strncmp to
20347 compare the remainder. */
20348 bool equality_compare_rest = false;
20350 if (no_length)
20352 bytes = compare_length;
20353 equality_compare_rest = true;
20355 else
20357 if (bytes <= compare_length)
20358 compare_length = bytes;
20359 else
20360 equality_compare_rest = true;
20363 rtx result_reg = gen_reg_rtx (word_mode);
20364 rtx final_move_label = gen_label_rtx ();
20365 rtx final_label = gen_label_rtx ();
20366 rtx begin_compare_label = NULL;
20368 if (base_align < 8)
20370 /* Generate code that checks distance to 4k boundary for this case. */
20371 begin_compare_label = gen_label_rtx ();
20372 rtx strncmp_label = gen_label_rtx ();
20373 rtx jmp;
20375 /* Strncmp for power8 in glibc does this:
20376 rldicl r8,r3,0,52
20377 cmpldi cr7,r8,4096-16
20378 bgt cr7,L(pagecross) */
20380 /* Make sure that the length we use for the alignment test and
20381 the subsequent code generation are in agreement so we do not
20382 go past the length we tested for a 4k boundary crossing. */
20383 unsigned HOST_WIDE_INT align_test = compare_length;
20384 if (align_test < 8)
20386 align_test = HOST_WIDE_INT_1U << ceil_log2 (align_test);
20387 base_align = align_test;
20389 else
20391 align_test = ROUND_UP (align_test, 8);
20392 base_align = 8;
20395 if (align1 < 8)
20396 expand_strncmp_align_check (strncmp_label, src1, align_test);
20397 if (align2 < 8)
20398 expand_strncmp_align_check (strncmp_label, src2, align_test);
20400 /* Now generate the following sequence:
20401 - branch to begin_compare
20402 - strncmp_label
20403 - call to strncmp
20404 - branch to final_label
20405 - begin_compare_label */
20407 rtx cmp_ref = gen_rtx_LABEL_REF (VOIDmode, begin_compare_label);
20408 jmp = emit_jump_insn (gen_rtx_SET (pc_rtx, cmp_ref));
20409 JUMP_LABEL (jmp) = begin_compare_label;
20410 LABEL_NUSES (begin_compare_label) += 1;
20411 emit_barrier ();
20413 emit_label (strncmp_label);
20415 if (!REG_P (XEXP (src1, 0)))
20417 rtx src1_reg = copy_addr_to_reg (XEXP (src1, 0));
20418 src1 = replace_equiv_address (src1, src1_reg);
20421 if (!REG_P (XEXP (src2, 0)))
20423 rtx src2_reg = copy_addr_to_reg (XEXP (src2, 0));
20424 src2 = replace_equiv_address (src2, src2_reg);
20427 if (no_length)
20429 tree fun = builtin_decl_explicit (BUILT_IN_STRCMP);
20430 emit_library_call_value (XEXP (DECL_RTL (fun), 0),
20431 target, LCT_NORMAL, GET_MODE (target), 2,
20432 force_reg (Pmode, XEXP (src1, 0)), Pmode,
20433 force_reg (Pmode, XEXP (src2, 0)), Pmode);
20435 else
20437 /* -m32 -mpowerpc64 results in word_mode being DImode even
20438 though otherwise it is 32-bit. The length arg to strncmp
20439 is a size_t which will be the same size as pointers. */
20440 rtx len_rtx;
20441 if (TARGET_64BIT)
20442 len_rtx = gen_reg_rtx (DImode);
20443 else
20444 len_rtx = gen_reg_rtx (SImode);
20446 emit_move_insn (len_rtx, bytes_rtx);
20448 tree fun = builtin_decl_explicit (BUILT_IN_STRNCMP);
20449 emit_library_call_value (XEXP (DECL_RTL (fun), 0),
20450 target, LCT_NORMAL, GET_MODE (target), 3,
20451 force_reg (Pmode, XEXP (src1, 0)), Pmode,
20452 force_reg (Pmode, XEXP (src2, 0)), Pmode,
20453 len_rtx, GET_MODE (len_rtx));
20456 rtx fin_ref = gen_rtx_LABEL_REF (VOIDmode, final_label);
20457 jmp = emit_jump_insn (gen_rtx_SET (pc_rtx, fin_ref));
20458 JUMP_LABEL (jmp) = final_label;
20459 LABEL_NUSES (final_label) += 1;
20460 emit_barrier ();
20461 emit_label (begin_compare_label);
20464 rtx cleanup_label = NULL;
20465 rtx tmp_reg_src1 = gen_reg_rtx (word_mode);
20466 rtx tmp_reg_src2 = gen_reg_rtx (word_mode);
20468 /* Generate sequence of ld/ldbrx, cmpb to compare out
20469 to the length specified. */
20470 unsigned HOST_WIDE_INT bytes_to_compare = compare_length;
20471 while (bytes_to_compare > 0)
20473 /* Compare sequence:
20474 check each 8B with: ld/ld cmpd bne
20475 If equal, use rldicr/cmpb to check for zero byte.
20476 cleanup code at end:
20477 cmpb get byte that differs
20478 cmpb look for zero byte
20479 orc combine
20480 cntlzd get bit of first zero/diff byte
20481 subfic convert for rldcl use
20482 rldcl rldcl extract diff/zero byte
20483 subf subtract for final result
20485 The last compare can branch around the cleanup code if the
20486 result is zero because the strings are exactly equal. */
20487 unsigned int align = compute_current_alignment (base_align, offset);
20488 if (TARGET_EFFICIENT_OVERLAPPING_UNALIGNED)
20489 load_mode = select_block_compare_mode (offset, bytes_to_compare, align,
20490 word_mode_ok);
20491 else
20492 load_mode = select_block_compare_mode (0, bytes_to_compare, align,
20493 word_mode_ok);
20494 load_mode_size = GET_MODE_SIZE (load_mode);
20495 if (bytes_to_compare >= load_mode_size)
20496 cmp_bytes = load_mode_size;
20497 else if (TARGET_EFFICIENT_OVERLAPPING_UNALIGNED)
20499 /* Move this load back so it doesn't go past the end.
20500 P8/P9 can do this efficiently. */
20501 unsigned int extra_bytes = load_mode_size - bytes_to_compare;
20502 cmp_bytes = bytes_to_compare;
20503 if (extra_bytes < offset)
20505 offset -= extra_bytes;
20506 cmp_bytes = load_mode_size;
20507 bytes_to_compare = cmp_bytes;
20510 else
20511 /* P7 and earlier can't do the overlapping load trick fast,
20512 so this forces a non-overlapping load and a shift to get
20513 rid of the extra bytes. */
20514 cmp_bytes = bytes_to_compare;
20516 src1 = adjust_address (orig_src1, load_mode, offset);
20517 src2 = adjust_address (orig_src2, load_mode, offset);
20519 if (!REG_P (XEXP (src1, 0)))
20521 rtx src1_reg = copy_addr_to_reg (XEXP (src1, 0));
20522 src1 = replace_equiv_address (src1, src1_reg);
20524 set_mem_size (src1, cmp_bytes);
20526 if (!REG_P (XEXP (src2, 0)))
20528 rtx src2_reg = copy_addr_to_reg (XEXP (src2, 0));
20529 src2 = replace_equiv_address (src2, src2_reg);
20531 set_mem_size (src2, cmp_bytes);
20533 do_load_for_compare (tmp_reg_src1, src1, load_mode);
20534 do_load_for_compare (tmp_reg_src2, src2, load_mode);
20536 /* We must always left-align the data we read, and
20537 clear any bytes to the right that are beyond the string.
20538 Otherwise the cmpb sequence won't produce the correct
20539 results. The beginning of the compare will be done
20540 with word_mode so will not have any extra shifts or
20541 clear rights. */
20543 if (load_mode_size < word_mode_size)
20545 /* Rotate left first. */
20546 rtx sh = GEN_INT (BITS_PER_UNIT * (word_mode_size - load_mode_size));
20547 if (word_mode == DImode)
20549 emit_insn (gen_rotldi3 (tmp_reg_src1, tmp_reg_src1, sh));
20550 emit_insn (gen_rotldi3 (tmp_reg_src2, tmp_reg_src2, sh));
20552 else
20554 emit_insn (gen_rotlsi3 (tmp_reg_src1, tmp_reg_src1, sh));
20555 emit_insn (gen_rotlsi3 (tmp_reg_src2, tmp_reg_src2, sh));
20559 if (cmp_bytes < word_mode_size)
20561 /* Now clear right. This plus the rotate can be
20562 turned into a rldicr instruction. */
20563 HOST_WIDE_INT mb = BITS_PER_UNIT * (word_mode_size - cmp_bytes);
20564 rtx mask = GEN_INT (HOST_WIDE_INT_M1U << mb);
20565 if (word_mode == DImode)
20567 emit_insn (gen_anddi3_mask (tmp_reg_src1, tmp_reg_src1, mask));
20568 emit_insn (gen_anddi3_mask (tmp_reg_src2, tmp_reg_src2, mask));
20570 else
20572 emit_insn (gen_andsi3_mask (tmp_reg_src1, tmp_reg_src1, mask));
20573 emit_insn (gen_andsi3_mask (tmp_reg_src2, tmp_reg_src2, mask));
20577 /* Cases to handle. A and B are chunks of the two strings.
20578 1: Not end of comparison:
20579 A != B: branch to cleanup code to compute result.
20580 A == B: check for 0 byte, next block if not found.
20581 2: End of the inline comparison:
20582 A != B: branch to cleanup code to compute result.
20583 A == B: check for 0 byte, call strcmp/strncmp
20584 3: compared requested N bytes:
20585 A == B: branch to result 0.
20586 A != B: cleanup code to compute result. */
20588 unsigned HOST_WIDE_INT remain = bytes_to_compare - cmp_bytes;
20590 rtx dst_label;
20591 if (remain > 0 || equality_compare_rest)
20593 /* Branch to cleanup code, otherwise fall through to do
20594 more compares. */
20595 if (!cleanup_label)
20596 cleanup_label = gen_label_rtx ();
20597 dst_label = cleanup_label;
20599 else
20600 /* Branch to end and produce result of 0. */
20601 dst_label = final_move_label;
20603 rtx lab_ref = gen_rtx_LABEL_REF (VOIDmode, dst_label);
20604 rtx cond = gen_reg_rtx (CCmode);
20606 /* Always produce the 0 result, it is needed if
20607 cmpb finds a 0 byte in this chunk. */
20608 rtx tmp = gen_rtx_MINUS (word_mode, tmp_reg_src1, tmp_reg_src2);
20609 rs6000_emit_dot_insn (result_reg, tmp, 1, cond);
20611 rtx cmp_rtx;
20612 if (remain == 0 && !equality_compare_rest)
20613 cmp_rtx = gen_rtx_EQ (VOIDmode, cond, const0_rtx);
20614 else
20615 cmp_rtx = gen_rtx_NE (VOIDmode, cond, const0_rtx);
20617 rtx ifelse = gen_rtx_IF_THEN_ELSE (VOIDmode, cmp_rtx,
20618 lab_ref, pc_rtx);
20619 rtx j = emit_jump_insn (gen_rtx_SET (pc_rtx, ifelse));
20620 JUMP_LABEL (j) = dst_label;
20621 LABEL_NUSES (dst_label) += 1;
20623 if (remain > 0 || equality_compare_rest)
20625 /* Generate a cmpb to test for a 0 byte and branch
20626 to final result if found. */
20627 rtx cmpb_zero = gen_reg_rtx (word_mode);
20628 rtx lab_ref_fin = gen_rtx_LABEL_REF (VOIDmode, final_move_label);
20629 rtx condz = gen_reg_rtx (CCmode);
20630 rtx zero_reg = gen_reg_rtx (word_mode);
20631 if (word_mode == SImode)
20633 emit_insn (gen_movsi (zero_reg, GEN_INT (0)));
20634 emit_insn (gen_cmpbsi3 (cmpb_zero, tmp_reg_src1, zero_reg));
20635 if (cmp_bytes < word_mode_size)
20637 /* Don't want to look at zero bytes past end. */
20638 HOST_WIDE_INT mb =
20639 BITS_PER_UNIT * (word_mode_size - cmp_bytes);
20640 rtx mask = GEN_INT (HOST_WIDE_INT_M1U << mb);
20641 emit_insn (gen_andsi3_mask (cmpb_zero, cmpb_zero, mask));
20644 else
20646 emit_insn (gen_movdi (zero_reg, GEN_INT (0)));
20647 emit_insn (gen_cmpbdi3 (cmpb_zero, tmp_reg_src1, zero_reg));
20648 if (cmp_bytes < word_mode_size)
20650 /* Don't want to look at zero bytes past end. */
20651 HOST_WIDE_INT mb =
20652 BITS_PER_UNIT * (word_mode_size - cmp_bytes);
20653 rtx mask = GEN_INT (HOST_WIDE_INT_M1U << mb);
20654 emit_insn (gen_anddi3_mask (cmpb_zero, cmpb_zero, mask));
20658 emit_move_insn (condz, gen_rtx_COMPARE (CCmode, cmpb_zero, zero_reg));
20659 rtx cmpnz_rtx = gen_rtx_NE (VOIDmode, condz, const0_rtx);
20660 rtx ifelse = gen_rtx_IF_THEN_ELSE (VOIDmode, cmpnz_rtx,
20661 lab_ref_fin, pc_rtx);
20662 rtx j2 = emit_jump_insn (gen_rtx_SET (pc_rtx, ifelse));
20663 JUMP_LABEL (j2) = final_move_label;
20664 LABEL_NUSES (final_move_label) += 1;
20668 offset += cmp_bytes;
20669 bytes_to_compare -= cmp_bytes;
20672 if (equality_compare_rest)
20674 /* Update pointers past what has been compared already. */
20675 src1 = adjust_address (orig_src1, load_mode, offset);
20676 src2 = adjust_address (orig_src2, load_mode, offset);
20678 if (!REG_P (XEXP (src1, 0)))
20680 rtx src1_reg = copy_addr_to_reg (XEXP (src1, 0));
20681 src1 = replace_equiv_address (src1, src1_reg);
20683 set_mem_size (src1, cmp_bytes);
20685 if (!REG_P (XEXP (src2, 0)))
20687 rtx src2_reg = copy_addr_to_reg (XEXP (src2, 0));
20688 src2 = replace_equiv_address (src2, src2_reg);
20690 set_mem_size (src2, cmp_bytes);
20692 /* Construct call to strcmp/strncmp to compare the rest of the string. */
20693 if (no_length)
20695 tree fun = builtin_decl_explicit (BUILT_IN_STRCMP);
20696 emit_library_call_value (XEXP (DECL_RTL (fun), 0),
20697 target, LCT_NORMAL, GET_MODE (target), 2,
20698 force_reg (Pmode, XEXP (src1, 0)), Pmode,
20699 force_reg (Pmode, XEXP (src2, 0)), Pmode);
20701 else
20703 rtx len_rtx;
20704 if (TARGET_64BIT)
20705 len_rtx = gen_reg_rtx (DImode);
20706 else
20707 len_rtx = gen_reg_rtx (SImode);
20709 emit_move_insn (len_rtx, GEN_INT (bytes - compare_length));
20710 tree fun = builtin_decl_explicit (BUILT_IN_STRNCMP);
20711 emit_library_call_value (XEXP (DECL_RTL (fun), 0),
20712 target, LCT_NORMAL, GET_MODE (target), 3,
20713 force_reg (Pmode, XEXP (src1, 0)), Pmode,
20714 force_reg (Pmode, XEXP (src2, 0)), Pmode,
20715 len_rtx, GET_MODE (len_rtx));
20718 rtx fin_ref = gen_rtx_LABEL_REF (VOIDmode, final_label);
20719 rtx jmp = emit_jump_insn (gen_rtx_SET (pc_rtx, fin_ref));
20720 JUMP_LABEL (jmp) = final_label;
20721 LABEL_NUSES (final_label) += 1;
20722 emit_barrier ();
20725 if (cleanup_label)
20726 emit_label (cleanup_label);
20728 /* Generate the final sequence that identifies the differing
20729 byte and generates the final result, taking into account
20730 zero bytes:
20732 cmpb cmpb_result1, src1, src2
20733 cmpb cmpb_result2, src1, zero
20734 orc cmpb_result1, cmp_result1, cmpb_result2
20735 cntlzd get bit of first zero/diff byte
20736 addi convert for rldcl use
20737 rldcl rldcl extract diff/zero byte
20738 subf subtract for final result
20741 rtx cmpb_diff = gen_reg_rtx (word_mode);
20742 rtx cmpb_zero = gen_reg_rtx (word_mode);
20743 rtx rot_amt = gen_reg_rtx (word_mode);
20744 rtx zero_reg = gen_reg_rtx (word_mode);
20746 rtx rot1_1 = gen_reg_rtx (word_mode);
20747 rtx rot1_2 = gen_reg_rtx (word_mode);
20748 rtx rot2_1 = gen_reg_rtx (word_mode);
20749 rtx rot2_2 = gen_reg_rtx (word_mode);
20751 if (word_mode == SImode)
20753 emit_insn (gen_cmpbsi3 (cmpb_diff, tmp_reg_src1, tmp_reg_src2));
20754 emit_insn (gen_movsi (zero_reg, GEN_INT (0)));
20755 emit_insn (gen_cmpbsi3 (cmpb_zero, tmp_reg_src1, zero_reg));
20756 emit_insn (gen_one_cmplsi2 (cmpb_diff,cmpb_diff));
20757 emit_insn (gen_iorsi3 (cmpb_diff, cmpb_diff, cmpb_zero));
20758 emit_insn (gen_clzsi2 (rot_amt, cmpb_diff));
20759 emit_insn (gen_addsi3 (rot_amt, rot_amt, GEN_INT (8)));
20760 emit_insn (gen_rotlsi3 (rot1_1, tmp_reg_src1,
20761 gen_lowpart (SImode, rot_amt)));
20762 emit_insn (gen_andsi3_mask (rot1_2, rot1_1, GEN_INT (0xff)));
20763 emit_insn (gen_rotlsi3 (rot2_1, tmp_reg_src2,
20764 gen_lowpart (SImode, rot_amt)));
20765 emit_insn (gen_andsi3_mask (rot2_2, rot2_1, GEN_INT (0xff)));
20766 emit_insn (gen_subsi3 (result_reg, rot1_2, rot2_2));
20768 else
20770 emit_insn (gen_cmpbdi3 (cmpb_diff, tmp_reg_src1, tmp_reg_src2));
20771 emit_insn (gen_movdi (zero_reg, GEN_INT (0)));
20772 emit_insn (gen_cmpbdi3 (cmpb_zero, tmp_reg_src1, zero_reg));
20773 emit_insn (gen_one_cmpldi2 (cmpb_diff,cmpb_diff));
20774 emit_insn (gen_iordi3 (cmpb_diff, cmpb_diff, cmpb_zero));
20775 emit_insn (gen_clzdi2 (rot_amt, cmpb_diff));
20776 emit_insn (gen_adddi3 (rot_amt, rot_amt, GEN_INT (8)));
20777 emit_insn (gen_rotldi3 (rot1_1, tmp_reg_src1,
20778 gen_lowpart (SImode, rot_amt)));
20779 emit_insn (gen_anddi3_mask (rot1_2, rot1_1, GEN_INT (0xff)));
20780 emit_insn (gen_rotldi3 (rot2_1, tmp_reg_src2,
20781 gen_lowpart (SImode, rot_amt)));
20782 emit_insn (gen_anddi3_mask (rot2_2, rot2_1, GEN_INT (0xff)));
20783 emit_insn (gen_subdi3 (result_reg, rot1_2, rot2_2));
20786 emit_label (final_move_label);
20787 emit_insn (gen_movsi (target,
20788 gen_lowpart (SImode, result_reg)));
20789 emit_label (final_label);
20790 return true;
20793 /* Expand a block move operation, and return 1 if successful. Return 0
20794 if we should let the compiler generate normal code.
20796 operands[0] is the destination
20797 operands[1] is the source
20798 operands[2] is the length
20799 operands[3] is the alignment */
20801 #define MAX_MOVE_REG 4
20804 expand_block_move (rtx operands[])
20806 rtx orig_dest = operands[0];
20807 rtx orig_src = operands[1];
20808 rtx bytes_rtx = operands[2];
20809 rtx align_rtx = operands[3];
20810 int constp = (GET_CODE (bytes_rtx) == CONST_INT);
20811 int align;
20812 int bytes;
20813 int offset;
20814 int move_bytes;
20815 rtx stores[MAX_MOVE_REG];
20816 int num_reg = 0;
20818 /* If this is not a fixed size move, just call memcpy */
20819 if (! constp)
20820 return 0;
20822 /* This must be a fixed size alignment */
20823 gcc_assert (GET_CODE (align_rtx) == CONST_INT);
20824 align = INTVAL (align_rtx) * BITS_PER_UNIT;
20826 /* Anything to move? */
20827 bytes = INTVAL (bytes_rtx);
20828 if (bytes <= 0)
20829 return 1;
20831 if (bytes > rs6000_block_move_inline_limit)
20832 return 0;
20834 for (offset = 0; bytes > 0; offset += move_bytes, bytes -= move_bytes)
20836 union {
20837 rtx (*movmemsi) (rtx, rtx, rtx, rtx);
20838 rtx (*mov) (rtx, rtx);
20839 } gen_func;
20840 machine_mode mode = BLKmode;
20841 rtx src, dest;
20843 /* Altivec first, since it will be faster than a string move
20844 when it applies, and usually not significantly larger. */
20845 if (TARGET_ALTIVEC && bytes >= 16 && align >= 128)
20847 move_bytes = 16;
20848 mode = V4SImode;
20849 gen_func.mov = gen_movv4si;
20851 else if (TARGET_SPE && bytes >= 8 && align >= 64)
20853 move_bytes = 8;
20854 mode = V2SImode;
20855 gen_func.mov = gen_movv2si;
20857 else if (TARGET_STRING
20858 && bytes > 24 /* move up to 32 bytes at a time */
20859 && ! fixed_regs[5]
20860 && ! fixed_regs[6]
20861 && ! fixed_regs[7]
20862 && ! fixed_regs[8]
20863 && ! fixed_regs[9]
20864 && ! fixed_regs[10]
20865 && ! fixed_regs[11]
20866 && ! fixed_regs[12])
20868 move_bytes = (bytes > 32) ? 32 : bytes;
20869 gen_func.movmemsi = gen_movmemsi_8reg;
20871 else if (TARGET_STRING
20872 && bytes > 16 /* move up to 24 bytes at a time */
20873 && ! fixed_regs[5]
20874 && ! fixed_regs[6]
20875 && ! fixed_regs[7]
20876 && ! fixed_regs[8]
20877 && ! fixed_regs[9]
20878 && ! fixed_regs[10])
20880 move_bytes = (bytes > 24) ? 24 : bytes;
20881 gen_func.movmemsi = gen_movmemsi_6reg;
20883 else if (TARGET_STRING
20884 && bytes > 8 /* move up to 16 bytes at a time */
20885 && ! fixed_regs[5]
20886 && ! fixed_regs[6]
20887 && ! fixed_regs[7]
20888 && ! fixed_regs[8])
20890 move_bytes = (bytes > 16) ? 16 : bytes;
20891 gen_func.movmemsi = gen_movmemsi_4reg;
20893 else if (bytes >= 8 && TARGET_POWERPC64
20894 && (align >= 64 || !STRICT_ALIGNMENT))
20896 move_bytes = 8;
20897 mode = DImode;
20898 gen_func.mov = gen_movdi;
20899 if (offset == 0 && align < 64)
20901 rtx addr;
20903 /* If the address form is reg+offset with offset not a
20904 multiple of four, reload into reg indirect form here
20905 rather than waiting for reload. This way we get one
20906 reload, not one per load and/or store. */
20907 addr = XEXP (orig_dest, 0);
20908 if ((GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
20909 && GET_CODE (XEXP (addr, 1)) == CONST_INT
20910 && (INTVAL (XEXP (addr, 1)) & 3) != 0)
20912 addr = copy_addr_to_reg (addr);
20913 orig_dest = replace_equiv_address (orig_dest, addr);
20915 addr = XEXP (orig_src, 0);
20916 if ((GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
20917 && GET_CODE (XEXP (addr, 1)) == CONST_INT
20918 && (INTVAL (XEXP (addr, 1)) & 3) != 0)
20920 addr = copy_addr_to_reg (addr);
20921 orig_src = replace_equiv_address (orig_src, addr);
20925 else if (TARGET_STRING && bytes > 4 && !TARGET_POWERPC64)
20926 { /* move up to 8 bytes at a time */
20927 move_bytes = (bytes > 8) ? 8 : bytes;
20928 gen_func.movmemsi = gen_movmemsi_2reg;
20930 else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
20931 { /* move 4 bytes */
20932 move_bytes = 4;
20933 mode = SImode;
20934 gen_func.mov = gen_movsi;
20936 else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
20937 { /* move 2 bytes */
20938 move_bytes = 2;
20939 mode = HImode;
20940 gen_func.mov = gen_movhi;
20942 else if (TARGET_STRING && bytes > 1)
20943 { /* move up to 4 bytes at a time */
20944 move_bytes = (bytes > 4) ? 4 : bytes;
20945 gen_func.movmemsi = gen_movmemsi_1reg;
20947 else /* move 1 byte at a time */
20949 move_bytes = 1;
20950 mode = QImode;
20951 gen_func.mov = gen_movqi;
20954 src = adjust_address (orig_src, mode, offset);
20955 dest = adjust_address (orig_dest, mode, offset);
20957 if (mode != BLKmode)
20959 rtx tmp_reg = gen_reg_rtx (mode);
20961 emit_insn ((*gen_func.mov) (tmp_reg, src));
20962 stores[num_reg++] = (*gen_func.mov) (dest, tmp_reg);
20965 if (mode == BLKmode || num_reg >= MAX_MOVE_REG || bytes == move_bytes)
20967 int i;
20968 for (i = 0; i < num_reg; i++)
20969 emit_insn (stores[i]);
20970 num_reg = 0;
20973 if (mode == BLKmode)
20975 /* Move the address into scratch registers. The movmemsi
20976 patterns require zero offset. */
20977 if (!REG_P (XEXP (src, 0)))
20979 rtx src_reg = copy_addr_to_reg (XEXP (src, 0));
20980 src = replace_equiv_address (src, src_reg);
20982 set_mem_size (src, move_bytes);
20984 if (!REG_P (XEXP (dest, 0)))
20986 rtx dest_reg = copy_addr_to_reg (XEXP (dest, 0));
20987 dest = replace_equiv_address (dest, dest_reg);
20989 set_mem_size (dest, move_bytes);
20991 emit_insn ((*gen_func.movmemsi) (dest, src,
20992 GEN_INT (move_bytes & 31),
20993 align_rtx));
20997 return 1;
21001 /* Return a string to perform a load_multiple operation.
21002 operands[0] is the vector.
21003 operands[1] is the source address.
21004 operands[2] is the first destination register. */
21006 const char *
21007 rs6000_output_load_multiple (rtx operands[3])
21009 /* We have to handle the case where the pseudo used to contain the address
21010 is assigned to one of the output registers. */
21011 int i, j;
21012 int words = XVECLEN (operands[0], 0);
21013 rtx xop[10];
21015 if (XVECLEN (operands[0], 0) == 1)
21016 return "lwz %2,0(%1)";
21018 for (i = 0; i < words; i++)
21019 if (refers_to_regno_p (REGNO (operands[2]) + i, operands[1]))
21021 if (i == words-1)
21023 xop[0] = GEN_INT (4 * (words-1));
21024 xop[1] = operands[1];
21025 xop[2] = operands[2];
21026 output_asm_insn ("lswi %2,%1,%0\n\tlwz %1,%0(%1)", xop);
21027 return "";
21029 else if (i == 0)
21031 xop[0] = GEN_INT (4 * (words-1));
21032 xop[1] = operands[1];
21033 xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
21034 output_asm_insn ("addi %1,%1,4\n\tlswi %2,%1,%0\n\tlwz %1,-4(%1)", xop);
21035 return "";
21037 else
21039 for (j = 0; j < words; j++)
21040 if (j != i)
21042 xop[0] = GEN_INT (j * 4);
21043 xop[1] = operands[1];
21044 xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + j);
21045 output_asm_insn ("lwz %2,%0(%1)", xop);
21047 xop[0] = GEN_INT (i * 4);
21048 xop[1] = operands[1];
21049 output_asm_insn ("lwz %1,%0(%1)", xop);
21050 return "";
21054 return "lswi %2,%1,%N0";
21058 /* A validation routine: say whether CODE, a condition code, and MODE
21059 match. The other alternatives either don't make sense or should
21060 never be generated. */
21062 void
21063 validate_condition_mode (enum rtx_code code, machine_mode mode)
21065 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
21066 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
21067 && GET_MODE_CLASS (mode) == MODE_CC);
21069 /* These don't make sense. */
21070 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
21071 || mode != CCUNSmode);
21073 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
21074 || mode == CCUNSmode);
21076 gcc_assert (mode == CCFPmode
21077 || (code != ORDERED && code != UNORDERED
21078 && code != UNEQ && code != LTGT
21079 && code != UNGT && code != UNLT
21080 && code != UNGE && code != UNLE));
21082 /* These should never be generated except for
21083 flag_finite_math_only. */
21084 gcc_assert (mode != CCFPmode
21085 || flag_finite_math_only
21086 || (code != LE && code != GE
21087 && code != UNEQ && code != LTGT
21088 && code != UNGT && code != UNLT));
21090 /* These are invalid; the information is not there. */
21091 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
21095 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm,
21096 rldicl, rldicr, or rldic instruction in mode MODE. If so, if E is
21097 not zero, store there the bit offset (counted from the right) where
21098 the single stretch of 1 bits begins; and similarly for B, the bit
21099 offset where it ends. */
21101 bool
21102 rs6000_is_valid_mask (rtx mask, int *b, int *e, machine_mode mode)
21104 unsigned HOST_WIDE_INT val = INTVAL (mask);
21105 unsigned HOST_WIDE_INT bit;
21106 int nb, ne;
21107 int n = GET_MODE_PRECISION (mode);
21109 if (mode != DImode && mode != SImode)
21110 return false;
21112 if (INTVAL (mask) >= 0)
21114 bit = val & -val;
21115 ne = exact_log2 (bit);
21116 nb = exact_log2 (val + bit);
21118 else if (val + 1 == 0)
21120 nb = n;
21121 ne = 0;
21123 else if (val & 1)
21125 val = ~val;
21126 bit = val & -val;
21127 nb = exact_log2 (bit);
21128 ne = exact_log2 (val + bit);
21130 else
21132 bit = val & -val;
21133 ne = exact_log2 (bit);
21134 if (val + bit == 0)
21135 nb = n;
21136 else
21137 nb = 0;
21140 nb--;
21142 if (nb < 0 || ne < 0 || nb >= n || ne >= n)
21143 return false;
21145 if (b)
21146 *b = nb;
21147 if (e)
21148 *e = ne;
21150 return true;
21153 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm, rldicl,
21154 or rldicr instruction, to implement an AND with it in mode MODE. */
21156 bool
21157 rs6000_is_valid_and_mask (rtx mask, machine_mode mode)
21159 int nb, ne;
21161 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
21162 return false;
21164 /* For DImode, we need a rldicl, rldicr, or a rlwinm with mask that
21165 does not wrap. */
21166 if (mode == DImode)
21167 return (ne == 0 || nb == 63 || (nb < 32 && ne <= nb));
21169 /* For SImode, rlwinm can do everything. */
21170 if (mode == SImode)
21171 return (nb < 32 && ne < 32);
21173 return false;
21176 /* Return the instruction template for an AND with mask in mode MODE, with
21177 operands OPERANDS. If DOT is true, make it a record-form instruction. */
21179 const char *
21180 rs6000_insn_for_and_mask (machine_mode mode, rtx *operands, bool dot)
21182 int nb, ne;
21184 if (!rs6000_is_valid_mask (operands[2], &nb, &ne, mode))
21185 gcc_unreachable ();
21187 if (mode == DImode && ne == 0)
21189 operands[3] = GEN_INT (63 - nb);
21190 if (dot)
21191 return "rldicl. %0,%1,0,%3";
21192 return "rldicl %0,%1,0,%3";
21195 if (mode == DImode && nb == 63)
21197 operands[3] = GEN_INT (63 - ne);
21198 if (dot)
21199 return "rldicr. %0,%1,0,%3";
21200 return "rldicr %0,%1,0,%3";
21203 if (nb < 32 && ne < 32)
21205 operands[3] = GEN_INT (31 - nb);
21206 operands[4] = GEN_INT (31 - ne);
21207 if (dot)
21208 return "rlwinm. %0,%1,0,%3,%4";
21209 return "rlwinm %0,%1,0,%3,%4";
21212 gcc_unreachable ();
21215 /* Return whether MASK (a CONST_INT) is a valid mask for any rlw[i]nm,
21216 rld[i]cl, rld[i]cr, or rld[i]c instruction, to implement an AND with
21217 shift SHIFT (a ROTATE, ASHIFT, or LSHIFTRT) in mode MODE. */
21219 bool
21220 rs6000_is_valid_shift_mask (rtx mask, rtx shift, machine_mode mode)
21222 int nb, ne;
21224 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
21225 return false;
21227 int n = GET_MODE_PRECISION (mode);
21228 int sh = -1;
21230 if (CONST_INT_P (XEXP (shift, 1)))
21232 sh = INTVAL (XEXP (shift, 1));
21233 if (sh < 0 || sh >= n)
21234 return false;
21237 rtx_code code = GET_CODE (shift);
21239 /* Convert any shift by 0 to a rotate, to simplify below code. */
21240 if (sh == 0)
21241 code = ROTATE;
21243 /* Convert rotate to simple shift if we can, to make analysis simpler. */
21244 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
21245 code = ASHIFT;
21246 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
21248 code = LSHIFTRT;
21249 sh = n - sh;
21252 /* DImode rotates need rld*. */
21253 if (mode == DImode && code == ROTATE)
21254 return (nb == 63 || ne == 0 || ne == sh);
21256 /* SImode rotates need rlw*. */
21257 if (mode == SImode && code == ROTATE)
21258 return (nb < 32 && ne < 32 && sh < 32);
21260 /* Wrap-around masks are only okay for rotates. */
21261 if (ne > nb)
21262 return false;
21264 /* Variable shifts are only okay for rotates. */
21265 if (sh < 0)
21266 return false;
21268 /* Don't allow ASHIFT if the mask is wrong for that. */
21269 if (code == ASHIFT && ne < sh)
21270 return false;
21272 /* If we can do it with an rlw*, we can do it. Don't allow LSHIFTRT
21273 if the mask is wrong for that. */
21274 if (nb < 32 && ne < 32 && sh < 32
21275 && !(code == LSHIFTRT && nb >= 32 - sh))
21276 return true;
21278 /* If we can do it with an rld*, we can do it. Don't allow LSHIFTRT
21279 if the mask is wrong for that. */
21280 if (code == LSHIFTRT)
21281 sh = 64 - sh;
21282 if (nb == 63 || ne == 0 || ne == sh)
21283 return !(code == LSHIFTRT && nb >= sh);
21285 return false;
21288 /* Return the instruction template for a shift with mask in mode MODE, with
21289 operands OPERANDS. If DOT is true, make it a record-form instruction. */
21291 const char *
21292 rs6000_insn_for_shift_mask (machine_mode mode, rtx *operands, bool dot)
21294 int nb, ne;
21296 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
21297 gcc_unreachable ();
21299 if (mode == DImode && ne == 0)
21301 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
21302 operands[2] = GEN_INT (64 - INTVAL (operands[2]));
21303 operands[3] = GEN_INT (63 - nb);
21304 if (dot)
21305 return "rld%I2cl. %0,%1,%2,%3";
21306 return "rld%I2cl %0,%1,%2,%3";
21309 if (mode == DImode && nb == 63)
21311 operands[3] = GEN_INT (63 - ne);
21312 if (dot)
21313 return "rld%I2cr. %0,%1,%2,%3";
21314 return "rld%I2cr %0,%1,%2,%3";
21317 if (mode == DImode
21318 && GET_CODE (operands[4]) != LSHIFTRT
21319 && CONST_INT_P (operands[2])
21320 && ne == INTVAL (operands[2]))
21322 operands[3] = GEN_INT (63 - nb);
21323 if (dot)
21324 return "rld%I2c. %0,%1,%2,%3";
21325 return "rld%I2c %0,%1,%2,%3";
21328 if (nb < 32 && ne < 32)
21330 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
21331 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
21332 operands[3] = GEN_INT (31 - nb);
21333 operands[4] = GEN_INT (31 - ne);
21334 /* This insn can also be a 64-bit rotate with mask that really makes
21335 it just a shift right (with mask); the %h below are to adjust for
21336 that situation (shift count is >= 32 in that case). */
21337 if (dot)
21338 return "rlw%I2nm. %0,%1,%h2,%3,%4";
21339 return "rlw%I2nm %0,%1,%h2,%3,%4";
21342 gcc_unreachable ();
21345 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwimi or
21346 rldimi instruction, to implement an insert with shift SHIFT (a ROTATE,
21347 ASHIFT, or LSHIFTRT) in mode MODE. */
21349 bool
21350 rs6000_is_valid_insert_mask (rtx mask, rtx shift, machine_mode mode)
21352 int nb, ne;
21354 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
21355 return false;
21357 int n = GET_MODE_PRECISION (mode);
21359 int sh = INTVAL (XEXP (shift, 1));
21360 if (sh < 0 || sh >= n)
21361 return false;
21363 rtx_code code = GET_CODE (shift);
21365 /* Convert any shift by 0 to a rotate, to simplify below code. */
21366 if (sh == 0)
21367 code = ROTATE;
21369 /* Convert rotate to simple shift if we can, to make analysis simpler. */
21370 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
21371 code = ASHIFT;
21372 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
21374 code = LSHIFTRT;
21375 sh = n - sh;
21378 /* DImode rotates need rldimi. */
21379 if (mode == DImode && code == ROTATE)
21380 return (ne == sh);
21382 /* SImode rotates need rlwimi. */
21383 if (mode == SImode && code == ROTATE)
21384 return (nb < 32 && ne < 32 && sh < 32);
21386 /* Wrap-around masks are only okay for rotates. */
21387 if (ne > nb)
21388 return false;
21390 /* Don't allow ASHIFT if the mask is wrong for that. */
21391 if (code == ASHIFT && ne < sh)
21392 return false;
21394 /* If we can do it with an rlwimi, we can do it. Don't allow LSHIFTRT
21395 if the mask is wrong for that. */
21396 if (nb < 32 && ne < 32 && sh < 32
21397 && !(code == LSHIFTRT && nb >= 32 - sh))
21398 return true;
21400 /* If we can do it with an rldimi, we can do it. Don't allow LSHIFTRT
21401 if the mask is wrong for that. */
21402 if (code == LSHIFTRT)
21403 sh = 64 - sh;
21404 if (ne == sh)
21405 return !(code == LSHIFTRT && nb >= sh);
21407 return false;
21410 /* Return the instruction template for an insert with mask in mode MODE, with
21411 operands OPERANDS. If DOT is true, make it a record-form instruction. */
21413 const char *
21414 rs6000_insn_for_insert_mask (machine_mode mode, rtx *operands, bool dot)
21416 int nb, ne;
21418 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
21419 gcc_unreachable ();
21421 /* Prefer rldimi because rlwimi is cracked. */
21422 if (TARGET_POWERPC64
21423 && (!dot || mode == DImode)
21424 && GET_CODE (operands[4]) != LSHIFTRT
21425 && ne == INTVAL (operands[2]))
21427 operands[3] = GEN_INT (63 - nb);
21428 if (dot)
21429 return "rldimi. %0,%1,%2,%3";
21430 return "rldimi %0,%1,%2,%3";
21433 if (nb < 32 && ne < 32)
21435 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
21436 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
21437 operands[3] = GEN_INT (31 - nb);
21438 operands[4] = GEN_INT (31 - ne);
21439 if (dot)
21440 return "rlwimi. %0,%1,%2,%3,%4";
21441 return "rlwimi %0,%1,%2,%3,%4";
21444 gcc_unreachable ();
21447 /* Return whether an AND with C (a CONST_INT) in mode MODE can be done
21448 using two machine instructions. */
21450 bool
21451 rs6000_is_valid_2insn_and (rtx c, machine_mode mode)
21453 /* There are two kinds of AND we can handle with two insns:
21454 1) those we can do with two rl* insn;
21455 2) ori[s];xori[s].
21457 We do not handle that last case yet. */
21459 /* If there is just one stretch of ones, we can do it. */
21460 if (rs6000_is_valid_mask (c, NULL, NULL, mode))
21461 return true;
21463 /* Otherwise, fill in the lowest "hole"; if we can do the result with
21464 one insn, we can do the whole thing with two. */
21465 unsigned HOST_WIDE_INT val = INTVAL (c);
21466 unsigned HOST_WIDE_INT bit1 = val & -val;
21467 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
21468 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
21469 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
21470 return rs6000_is_valid_and_mask (GEN_INT (val + bit3 - bit2), mode);
21473 /* Emit the two insns to do an AND in mode MODE, with operands OPERANDS.
21474 If EXPAND is true, split rotate-and-mask instructions we generate to
21475 their constituent parts as well (this is used during expand); if DOT
21476 is 1, make the last insn a record-form instruction clobbering the
21477 destination GPR and setting the CC reg (from operands[3]); if 2, set
21478 that GPR as well as the CC reg. */
21480 void
21481 rs6000_emit_2insn_and (machine_mode mode, rtx *operands, bool expand, int dot)
21483 gcc_assert (!(expand && dot));
21485 unsigned HOST_WIDE_INT val = INTVAL (operands[2]);
21487 /* If it is one stretch of ones, it is DImode; shift left, mask, then
21488 shift right. This generates better code than doing the masks without
21489 shifts, or shifting first right and then left. */
21490 int nb, ne;
21491 if (rs6000_is_valid_mask (operands[2], &nb, &ne, mode) && nb >= ne)
21493 gcc_assert (mode == DImode);
21495 int shift = 63 - nb;
21496 if (expand)
21498 rtx tmp1 = gen_reg_rtx (DImode);
21499 rtx tmp2 = gen_reg_rtx (DImode);
21500 emit_insn (gen_ashldi3 (tmp1, operands[1], GEN_INT (shift)));
21501 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (val << shift)));
21502 emit_insn (gen_lshrdi3 (operands[0], tmp2, GEN_INT (shift)));
21504 else
21506 rtx tmp = gen_rtx_ASHIFT (mode, operands[1], GEN_INT (shift));
21507 tmp = gen_rtx_AND (mode, tmp, GEN_INT (val << shift));
21508 emit_move_insn (operands[0], tmp);
21509 tmp = gen_rtx_LSHIFTRT (mode, operands[0], GEN_INT (shift));
21510 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
21512 return;
21515 /* Otherwise, make a mask2 that cuts out the lowest "hole", and a mask1
21516 that does the rest. */
21517 unsigned HOST_WIDE_INT bit1 = val & -val;
21518 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
21519 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
21520 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
21522 unsigned HOST_WIDE_INT mask1 = -bit3 + bit2 - 1;
21523 unsigned HOST_WIDE_INT mask2 = val + bit3 - bit2;
21525 gcc_assert (rs6000_is_valid_and_mask (GEN_INT (mask2), mode));
21527 /* Two "no-rotate"-and-mask instructions, for SImode. */
21528 if (rs6000_is_valid_and_mask (GEN_INT (mask1), mode))
21530 gcc_assert (mode == SImode);
21532 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
21533 rtx tmp = gen_rtx_AND (mode, operands[1], GEN_INT (mask1));
21534 emit_move_insn (reg, tmp);
21535 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
21536 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
21537 return;
21540 gcc_assert (mode == DImode);
21542 /* Two "no-rotate"-and-mask instructions, for DImode: both are rlwinm
21543 insns; we have to do the first in SImode, because it wraps. */
21544 if (mask2 <= 0xffffffff
21545 && rs6000_is_valid_and_mask (GEN_INT (mask1), SImode))
21547 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
21548 rtx tmp = gen_rtx_AND (SImode, gen_lowpart (SImode, operands[1]),
21549 GEN_INT (mask1));
21550 rtx reg_low = gen_lowpart (SImode, reg);
21551 emit_move_insn (reg_low, tmp);
21552 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
21553 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
21554 return;
21557 /* Two rld* insns: rotate, clear the hole in the middle (which now is
21558 at the top end), rotate back and clear the other hole. */
21559 int right = exact_log2 (bit3);
21560 int left = 64 - right;
21562 /* Rotate the mask too. */
21563 mask1 = (mask1 >> right) | ((bit2 - 1) << left);
21565 if (expand)
21567 rtx tmp1 = gen_reg_rtx (DImode);
21568 rtx tmp2 = gen_reg_rtx (DImode);
21569 rtx tmp3 = gen_reg_rtx (DImode);
21570 emit_insn (gen_rotldi3 (tmp1, operands[1], GEN_INT (left)));
21571 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (mask1)));
21572 emit_insn (gen_rotldi3 (tmp3, tmp2, GEN_INT (right)));
21573 emit_insn (gen_anddi3 (operands[0], tmp3, GEN_INT (mask2)));
21575 else
21577 rtx tmp = gen_rtx_ROTATE (mode, operands[1], GEN_INT (left));
21578 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask1));
21579 emit_move_insn (operands[0], tmp);
21580 tmp = gen_rtx_ROTATE (mode, operands[0], GEN_INT (right));
21581 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask2));
21582 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
21586 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
21587 for lfq and stfq insns iff the registers are hard registers. */
21590 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
21592 /* We might have been passed a SUBREG. */
21593 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
21594 return 0;
21596 /* We might have been passed non floating point registers. */
21597 if (!FP_REGNO_P (REGNO (reg1))
21598 || !FP_REGNO_P (REGNO (reg2)))
21599 return 0;
21601 return (REGNO (reg1) == REGNO (reg2) - 1);
21604 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
21605 addr1 and addr2 must be in consecutive memory locations
21606 (addr2 == addr1 + 8). */
21609 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
21611 rtx addr1, addr2;
21612 unsigned int reg1, reg2;
21613 int offset1, offset2;
21615 /* The mems cannot be volatile. */
21616 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
21617 return 0;
21619 addr1 = XEXP (mem1, 0);
21620 addr2 = XEXP (mem2, 0);
21622 /* Extract an offset (if used) from the first addr. */
21623 if (GET_CODE (addr1) == PLUS)
21625 /* If not a REG, return zero. */
21626 if (GET_CODE (XEXP (addr1, 0)) != REG)
21627 return 0;
21628 else
21630 reg1 = REGNO (XEXP (addr1, 0));
21631 /* The offset must be constant! */
21632 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
21633 return 0;
21634 offset1 = INTVAL (XEXP (addr1, 1));
21637 else if (GET_CODE (addr1) != REG)
21638 return 0;
21639 else
21641 reg1 = REGNO (addr1);
21642 /* This was a simple (mem (reg)) expression. Offset is 0. */
21643 offset1 = 0;
21646 /* And now for the second addr. */
21647 if (GET_CODE (addr2) == PLUS)
21649 /* If not a REG, return zero. */
21650 if (GET_CODE (XEXP (addr2, 0)) != REG)
21651 return 0;
21652 else
21654 reg2 = REGNO (XEXP (addr2, 0));
21655 /* The offset must be constant. */
21656 if (GET_CODE (XEXP (addr2, 1)) != CONST_INT)
21657 return 0;
21658 offset2 = INTVAL (XEXP (addr2, 1));
21661 else if (GET_CODE (addr2) != REG)
21662 return 0;
21663 else
21665 reg2 = REGNO (addr2);
21666 /* This was a simple (mem (reg)) expression. Offset is 0. */
21667 offset2 = 0;
21670 /* Both of these must have the same base register. */
21671 if (reg1 != reg2)
21672 return 0;
21674 /* The offset for the second addr must be 8 more than the first addr. */
21675 if (offset2 != offset1 + 8)
21676 return 0;
21678 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
21679 instructions. */
21680 return 1;
21685 rs6000_secondary_memory_needed_rtx (machine_mode mode)
21687 static bool eliminated = false;
21688 rtx ret;
21690 if (mode != SDmode || TARGET_NO_SDMODE_STACK)
21691 ret = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
21692 else
21694 rtx mem = cfun->machine->sdmode_stack_slot;
21695 gcc_assert (mem != NULL_RTX);
21697 if (!eliminated)
21699 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
21700 cfun->machine->sdmode_stack_slot = mem;
21701 eliminated = true;
21703 ret = mem;
21706 if (TARGET_DEBUG_ADDR)
21708 fprintf (stderr, "\nrs6000_secondary_memory_needed_rtx, mode %s, rtx:\n",
21709 GET_MODE_NAME (mode));
21710 if (!ret)
21711 fprintf (stderr, "\tNULL_RTX\n");
21712 else
21713 debug_rtx (ret);
21716 return ret;
21719 /* Return the mode to be used for memory when a secondary memory
21720 location is needed. For SDmode values we need to use DDmode, in
21721 all other cases we can use the same mode. */
21722 machine_mode
21723 rs6000_secondary_memory_needed_mode (machine_mode mode)
21725 if (lra_in_progress && mode == SDmode)
21726 return DDmode;
21727 return mode;
21730 static tree
21731 rs6000_check_sdmode (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED)
21733 /* Don't walk into types. */
21734 if (*tp == NULL_TREE || *tp == error_mark_node || TYPE_P (*tp))
21736 *walk_subtrees = 0;
21737 return NULL_TREE;
21740 switch (TREE_CODE (*tp))
21742 case VAR_DECL:
21743 case PARM_DECL:
21744 case FIELD_DECL:
21745 case RESULT_DECL:
21746 case SSA_NAME:
21747 case REAL_CST:
21748 case MEM_REF:
21749 case VIEW_CONVERT_EXPR:
21750 if (TYPE_MODE (TREE_TYPE (*tp)) == SDmode)
21751 return *tp;
21752 break;
21753 default:
21754 break;
21757 return NULL_TREE;
21760 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
21761 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
21762 only work on the traditional altivec registers, note if an altivec register
21763 was chosen. */
21765 static enum rs6000_reg_type
21766 register_to_reg_type (rtx reg, bool *is_altivec)
21768 HOST_WIDE_INT regno;
21769 enum reg_class rclass;
21771 if (GET_CODE (reg) == SUBREG)
21772 reg = SUBREG_REG (reg);
21774 if (!REG_P (reg))
21775 return NO_REG_TYPE;
21777 regno = REGNO (reg);
21778 if (regno >= FIRST_PSEUDO_REGISTER)
21780 if (!lra_in_progress && !reload_in_progress && !reload_completed)
21781 return PSEUDO_REG_TYPE;
21783 regno = true_regnum (reg);
21784 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
21785 return PSEUDO_REG_TYPE;
21788 gcc_assert (regno >= 0);
21790 if (is_altivec && ALTIVEC_REGNO_P (regno))
21791 *is_altivec = true;
21793 rclass = rs6000_regno_regclass[regno];
21794 return reg_class_to_reg_type[(int)rclass];
21797 /* Helper function to return the cost of adding a TOC entry address. */
21799 static inline int
21800 rs6000_secondary_reload_toc_costs (addr_mask_type addr_mask)
21802 int ret;
21804 if (TARGET_CMODEL != CMODEL_SMALL)
21805 ret = ((addr_mask & RELOAD_REG_OFFSET) == 0) ? 1 : 2;
21807 else
21808 ret = (TARGET_MINIMAL_TOC) ? 6 : 3;
21810 return ret;
21813 /* Helper function for rs6000_secondary_reload to determine whether the memory
21814 address (ADDR) with a given register class (RCLASS) and machine mode (MODE)
21815 needs reloading. Return negative if the memory is not handled by the memory
21816 helper functions and to try a different reload method, 0 if no additional
21817 instructions are need, and positive to give the extra cost for the
21818 memory. */
21820 static int
21821 rs6000_secondary_reload_memory (rtx addr,
21822 enum reg_class rclass,
21823 machine_mode mode)
21825 int extra_cost = 0;
21826 rtx reg, and_arg, plus_arg0, plus_arg1;
21827 addr_mask_type addr_mask;
21828 const char *type = NULL;
21829 const char *fail_msg = NULL;
21831 if (GPR_REG_CLASS_P (rclass))
21832 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
21834 else if (rclass == FLOAT_REGS)
21835 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
21837 else if (rclass == ALTIVEC_REGS)
21838 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
21840 /* For the combined VSX_REGS, turn off Altivec AND -16. */
21841 else if (rclass == VSX_REGS)
21842 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_VMX]
21843 & ~RELOAD_REG_AND_M16);
21845 /* If the register allocator hasn't made up its mind yet on the register
21846 class to use, settle on defaults to use. */
21847 else if (rclass == NO_REGS)
21849 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_ANY]
21850 & ~RELOAD_REG_AND_M16);
21852 if ((addr_mask & RELOAD_REG_MULTIPLE) != 0)
21853 addr_mask &= ~(RELOAD_REG_INDEXED
21854 | RELOAD_REG_PRE_INCDEC
21855 | RELOAD_REG_PRE_MODIFY);
21858 else
21859 addr_mask = 0;
21861 /* If the register isn't valid in this register class, just return now. */
21862 if ((addr_mask & RELOAD_REG_VALID) == 0)
21864 if (TARGET_DEBUG_ADDR)
21866 fprintf (stderr,
21867 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
21868 "not valid in class\n",
21869 GET_MODE_NAME (mode), reg_class_names[rclass]);
21870 debug_rtx (addr);
21873 return -1;
21876 switch (GET_CODE (addr))
21878 /* Does the register class supports auto update forms for this mode? We
21879 don't need a scratch register, since the powerpc only supports
21880 PRE_INC, PRE_DEC, and PRE_MODIFY. */
21881 case PRE_INC:
21882 case PRE_DEC:
21883 reg = XEXP (addr, 0);
21884 if (!base_reg_operand (addr, GET_MODE (reg)))
21886 fail_msg = "no base register #1";
21887 extra_cost = -1;
21890 else if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
21892 extra_cost = 1;
21893 type = "update";
21895 break;
21897 case PRE_MODIFY:
21898 reg = XEXP (addr, 0);
21899 plus_arg1 = XEXP (addr, 1);
21900 if (!base_reg_operand (reg, GET_MODE (reg))
21901 || GET_CODE (plus_arg1) != PLUS
21902 || !rtx_equal_p (reg, XEXP (plus_arg1, 0)))
21904 fail_msg = "bad PRE_MODIFY";
21905 extra_cost = -1;
21908 else if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
21910 extra_cost = 1;
21911 type = "update";
21913 break;
21915 /* Do we need to simulate AND -16 to clear the bottom address bits used
21916 in VMX load/stores? Only allow the AND for vector sizes. */
21917 case AND:
21918 and_arg = XEXP (addr, 0);
21919 if (GET_MODE_SIZE (mode) != 16
21920 || GET_CODE (XEXP (addr, 1)) != CONST_INT
21921 || INTVAL (XEXP (addr, 1)) != -16)
21923 fail_msg = "bad Altivec AND #1";
21924 extra_cost = -1;
21927 if (rclass != ALTIVEC_REGS)
21929 if (legitimate_indirect_address_p (and_arg, false))
21930 extra_cost = 1;
21932 else if (legitimate_indexed_address_p (and_arg, false))
21933 extra_cost = 2;
21935 else
21937 fail_msg = "bad Altivec AND #2";
21938 extra_cost = -1;
21941 type = "and";
21943 break;
21945 /* If this is an indirect address, make sure it is a base register. */
21946 case REG:
21947 case SUBREG:
21948 if (!legitimate_indirect_address_p (addr, false))
21950 extra_cost = 1;
21951 type = "move";
21953 break;
21955 /* If this is an indexed address, make sure the register class can handle
21956 indexed addresses for this mode. */
21957 case PLUS:
21958 plus_arg0 = XEXP (addr, 0);
21959 plus_arg1 = XEXP (addr, 1);
21961 /* (plus (plus (reg) (constant)) (constant)) is generated during
21962 push_reload processing, so handle it now. */
21963 if (GET_CODE (plus_arg0) == PLUS && CONST_INT_P (plus_arg1))
21965 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
21967 extra_cost = 1;
21968 type = "offset";
21972 /* (plus (plus (reg) (constant)) (reg)) is also generated during
21973 push_reload processing, so handle it now. */
21974 else if (GET_CODE (plus_arg0) == PLUS && REG_P (plus_arg1))
21976 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
21978 extra_cost = 1;
21979 type = "indexed #2";
21983 else if (!base_reg_operand (plus_arg0, GET_MODE (plus_arg0)))
21985 fail_msg = "no base register #2";
21986 extra_cost = -1;
21989 else if (int_reg_operand (plus_arg1, GET_MODE (plus_arg1)))
21991 if ((addr_mask & RELOAD_REG_INDEXED) == 0
21992 || !legitimate_indexed_address_p (addr, false))
21994 extra_cost = 1;
21995 type = "indexed";
21999 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0
22000 && CONST_INT_P (plus_arg1))
22002 if (!quad_address_offset_p (INTVAL (plus_arg1)))
22004 extra_cost = 1;
22005 type = "vector d-form offset";
22009 /* Make sure the register class can handle offset addresses. */
22010 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
22012 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
22014 extra_cost = 1;
22015 type = "offset #2";
22019 else
22021 fail_msg = "bad PLUS";
22022 extra_cost = -1;
22025 break;
22027 case LO_SUM:
22028 /* Quad offsets are restricted and can't handle normal addresses. */
22029 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
22031 extra_cost = -1;
22032 type = "vector d-form lo_sum";
22035 else if (!legitimate_lo_sum_address_p (mode, addr, false))
22037 fail_msg = "bad LO_SUM";
22038 extra_cost = -1;
22041 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
22043 extra_cost = 1;
22044 type = "lo_sum";
22046 break;
22048 /* Static addresses need to create a TOC entry. */
22049 case CONST:
22050 case SYMBOL_REF:
22051 case LABEL_REF:
22052 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
22054 extra_cost = -1;
22055 type = "vector d-form lo_sum #2";
22058 else
22060 type = "address";
22061 extra_cost = rs6000_secondary_reload_toc_costs (addr_mask);
22063 break;
22065 /* TOC references look like offsetable memory. */
22066 case UNSPEC:
22067 if (TARGET_CMODEL == CMODEL_SMALL || XINT (addr, 1) != UNSPEC_TOCREL)
22069 fail_msg = "bad UNSPEC";
22070 extra_cost = -1;
22073 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
22075 extra_cost = -1;
22076 type = "vector d-form lo_sum #3";
22079 else if ((addr_mask & RELOAD_REG_OFFSET) == 0)
22081 extra_cost = 1;
22082 type = "toc reference";
22084 break;
22086 default:
22088 fail_msg = "bad address";
22089 extra_cost = -1;
22093 if (TARGET_DEBUG_ADDR /* && extra_cost != 0 */)
22095 if (extra_cost < 0)
22096 fprintf (stderr,
22097 "rs6000_secondary_reload_memory error: mode = %s, "
22098 "class = %s, addr_mask = '%s', %s\n",
22099 GET_MODE_NAME (mode),
22100 reg_class_names[rclass],
22101 rs6000_debug_addr_mask (addr_mask, false),
22102 (fail_msg != NULL) ? fail_msg : "<bad address>");
22104 else
22105 fprintf (stderr,
22106 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
22107 "addr_mask = '%s', extra cost = %d, %s\n",
22108 GET_MODE_NAME (mode),
22109 reg_class_names[rclass],
22110 rs6000_debug_addr_mask (addr_mask, false),
22111 extra_cost,
22112 (type) ? type : "<none>");
22114 debug_rtx (addr);
22117 return extra_cost;
22120 /* Helper function for rs6000_secondary_reload to return true if a move to a
22121 different register classe is really a simple move. */
22123 static bool
22124 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type,
22125 enum rs6000_reg_type from_type,
22126 machine_mode mode)
22128 int size = GET_MODE_SIZE (mode);
22130 /* Add support for various direct moves available. In this function, we only
22131 look at cases where we don't need any extra registers, and one or more
22132 simple move insns are issued. Originally small integers are not allowed
22133 in FPR/VSX registers. Single precision binary floating is not a simple
22134 move because we need to convert to the single precision memory layout.
22135 The 4-byte SDmode can be moved. TDmode values are disallowed since they
22136 need special direct move handling, which we do not support yet. */
22137 if (TARGET_DIRECT_MOVE
22138 && ((to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
22139 || (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)))
22141 if (TARGET_POWERPC64)
22143 /* ISA 2.07: MTVSRD or MVFVSRD. */
22144 if (size == 8)
22145 return true;
22147 /* ISA 3.0: MTVSRDD or MFVSRD + MFVSRLD. */
22148 if (size == 16 && TARGET_P9_VECTOR && mode != TDmode)
22149 return true;
22152 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
22153 if (TARGET_VSX_SMALL_INTEGER)
22155 if (mode == SImode)
22156 return true;
22158 if (TARGET_P9_VECTOR && (mode == HImode || mode == QImode))
22159 return true;
22162 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
22163 if (mode == SDmode)
22164 return true;
22167 /* Power6+: MFTGPR or MFFGPR. */
22168 else if (TARGET_MFPGPR && TARGET_POWERPC64 && size == 8
22169 && ((to_type == GPR_REG_TYPE && from_type == FPR_REG_TYPE)
22170 || (to_type == FPR_REG_TYPE && from_type == GPR_REG_TYPE)))
22171 return true;
22173 /* Move to/from SPR. */
22174 else if ((size == 4 || (TARGET_POWERPC64 && size == 8))
22175 && ((to_type == GPR_REG_TYPE && from_type == SPR_REG_TYPE)
22176 || (to_type == SPR_REG_TYPE && from_type == GPR_REG_TYPE)))
22177 return true;
22179 return false;
22182 /* Direct move helper function for rs6000_secondary_reload, handle all of the
22183 special direct moves that involve allocating an extra register, return the
22184 insn code of the helper function if there is such a function or
22185 CODE_FOR_nothing if not. */
22187 static bool
22188 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type,
22189 enum rs6000_reg_type from_type,
22190 machine_mode mode,
22191 secondary_reload_info *sri,
22192 bool altivec_p)
22194 bool ret = false;
22195 enum insn_code icode = CODE_FOR_nothing;
22196 int cost = 0;
22197 int size = GET_MODE_SIZE (mode);
22199 if (TARGET_POWERPC64 && size == 16)
22201 /* Handle moving 128-bit values from GPRs to VSX point registers on
22202 ISA 2.07 (power8, power9) when running in 64-bit mode using
22203 XXPERMDI to glue the two 64-bit values back together. */
22204 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
22206 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
22207 icode = reg_addr[mode].reload_vsx_gpr;
22210 /* Handle moving 128-bit values from VSX point registers to GPRs on
22211 ISA 2.07 when running in 64-bit mode using XXPERMDI to get access to the
22212 bottom 64-bit value. */
22213 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
22215 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
22216 icode = reg_addr[mode].reload_gpr_vsx;
22220 else if (TARGET_POWERPC64 && mode == SFmode)
22222 if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
22224 cost = 3; /* xscvdpspn, mfvsrd, and. */
22225 icode = reg_addr[mode].reload_gpr_vsx;
22228 else if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
22230 cost = 2; /* mtvsrz, xscvspdpn. */
22231 icode = reg_addr[mode].reload_vsx_gpr;
22235 else if (!TARGET_POWERPC64 && size == 8)
22237 /* Handle moving 64-bit values from GPRs to floating point registers on
22238 ISA 2.07 when running in 32-bit mode using FMRGOW to glue the two
22239 32-bit values back together. Altivec register classes must be handled
22240 specially since a different instruction is used, and the secondary
22241 reload support requires a single instruction class in the scratch
22242 register constraint. However, right now TFmode is not allowed in
22243 Altivec registers, so the pattern will never match. */
22244 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE && !altivec_p)
22246 cost = 3; /* 2 mtvsrwz's, 1 fmrgow. */
22247 icode = reg_addr[mode].reload_fpr_gpr;
22251 if (icode != CODE_FOR_nothing)
22253 ret = true;
22254 if (sri)
22256 sri->icode = icode;
22257 sri->extra_cost = cost;
22261 return ret;
22264 /* Return whether a move between two register classes can be done either
22265 directly (simple move) or via a pattern that uses a single extra temporary
22266 (using ISA 2.07's direct move in this case. */
22268 static bool
22269 rs6000_secondary_reload_move (enum rs6000_reg_type to_type,
22270 enum rs6000_reg_type from_type,
22271 machine_mode mode,
22272 secondary_reload_info *sri,
22273 bool altivec_p)
22275 /* Fall back to load/store reloads if either type is not a register. */
22276 if (to_type == NO_REG_TYPE || from_type == NO_REG_TYPE)
22277 return false;
22279 /* If we haven't allocated registers yet, assume the move can be done for the
22280 standard register types. */
22281 if ((to_type == PSEUDO_REG_TYPE && from_type == PSEUDO_REG_TYPE)
22282 || (to_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (from_type))
22283 || (from_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (to_type)))
22284 return true;
22286 /* Moves to the same set of registers is a simple move for non-specialized
22287 registers. */
22288 if (to_type == from_type && IS_STD_REG_TYPE (to_type))
22289 return true;
22291 /* Check whether a simple move can be done directly. */
22292 if (rs6000_secondary_reload_simple_move (to_type, from_type, mode))
22294 if (sri)
22296 sri->icode = CODE_FOR_nothing;
22297 sri->extra_cost = 0;
22299 return true;
22302 /* Now check if we can do it in a few steps. */
22303 return rs6000_secondary_reload_direct_move (to_type, from_type, mode, sri,
22304 altivec_p);
22307 /* Inform reload about cases where moving X with a mode MODE to a register in
22308 RCLASS requires an extra scratch or immediate register. Return the class
22309 needed for the immediate register.
22311 For VSX and Altivec, we may need a register to convert sp+offset into
22312 reg+sp.
22314 For misaligned 64-bit gpr loads and stores we need a register to
22315 convert an offset address to indirect. */
22317 static reg_class_t
22318 rs6000_secondary_reload (bool in_p,
22319 rtx x,
22320 reg_class_t rclass_i,
22321 machine_mode mode,
22322 secondary_reload_info *sri)
22324 enum reg_class rclass = (enum reg_class) rclass_i;
22325 reg_class_t ret = ALL_REGS;
22326 enum insn_code icode;
22327 bool default_p = false;
22328 bool done_p = false;
22330 /* Allow subreg of memory before/during reload. */
22331 bool memory_p = (MEM_P (x)
22332 || (!reload_completed && GET_CODE (x) == SUBREG
22333 && MEM_P (SUBREG_REG (x))));
22335 sri->icode = CODE_FOR_nothing;
22336 sri->t_icode = CODE_FOR_nothing;
22337 sri->extra_cost = 0;
22338 icode = ((in_p)
22339 ? reg_addr[mode].reload_load
22340 : reg_addr[mode].reload_store);
22342 if (REG_P (x) || register_operand (x, mode))
22344 enum rs6000_reg_type to_type = reg_class_to_reg_type[(int)rclass];
22345 bool altivec_p = (rclass == ALTIVEC_REGS);
22346 enum rs6000_reg_type from_type = register_to_reg_type (x, &altivec_p);
22348 if (!in_p)
22349 std::swap (to_type, from_type);
22351 /* Can we do a direct move of some sort? */
22352 if (rs6000_secondary_reload_move (to_type, from_type, mode, sri,
22353 altivec_p))
22355 icode = (enum insn_code)sri->icode;
22356 default_p = false;
22357 done_p = true;
22358 ret = NO_REGS;
22362 /* Make sure 0.0 is not reloaded or forced into memory. */
22363 if (x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
22365 ret = NO_REGS;
22366 default_p = false;
22367 done_p = true;
22370 /* If this is a scalar floating point value and we want to load it into the
22371 traditional Altivec registers, do it via a move via a traditional floating
22372 point register, unless we have D-form addressing. Also make sure that
22373 non-zero constants use a FPR. */
22374 if (!done_p && reg_addr[mode].scalar_in_vmx_p
22375 && !mode_supports_vmx_dform (mode)
22376 && (rclass == VSX_REGS || rclass == ALTIVEC_REGS)
22377 && (memory_p || (GET_CODE (x) == CONST_DOUBLE)))
22379 ret = FLOAT_REGS;
22380 default_p = false;
22381 done_p = true;
22384 /* Handle reload of load/stores if we have reload helper functions. */
22385 if (!done_p && icode != CODE_FOR_nothing && memory_p)
22387 int extra_cost = rs6000_secondary_reload_memory (XEXP (x, 0), rclass,
22388 mode);
22390 if (extra_cost >= 0)
22392 done_p = true;
22393 ret = NO_REGS;
22394 if (extra_cost > 0)
22396 sri->extra_cost = extra_cost;
22397 sri->icode = icode;
22402 /* Handle unaligned loads and stores of integer registers. */
22403 if (!done_p && TARGET_POWERPC64
22404 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
22405 && memory_p
22406 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
22408 rtx addr = XEXP (x, 0);
22409 rtx off = address_offset (addr);
22411 if (off != NULL_RTX)
22413 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
22414 unsigned HOST_WIDE_INT offset = INTVAL (off);
22416 /* We need a secondary reload when our legitimate_address_p
22417 says the address is good (as otherwise the entire address
22418 will be reloaded), and the offset is not a multiple of
22419 four or we have an address wrap. Address wrap will only
22420 occur for LO_SUMs since legitimate_offset_address_p
22421 rejects addresses for 16-byte mems that will wrap. */
22422 if (GET_CODE (addr) == LO_SUM
22423 ? (1 /* legitimate_address_p allows any offset for lo_sum */
22424 && ((offset & 3) != 0
22425 || ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra))
22426 : (offset + 0x8000 < 0x10000 - extra /* legitimate_address_p */
22427 && (offset & 3) != 0))
22429 /* -m32 -mpowerpc64 needs to use a 32-bit scratch register. */
22430 if (in_p)
22431 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_load
22432 : CODE_FOR_reload_di_load);
22433 else
22434 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_store
22435 : CODE_FOR_reload_di_store);
22436 sri->extra_cost = 2;
22437 ret = NO_REGS;
22438 done_p = true;
22440 else
22441 default_p = true;
22443 else
22444 default_p = true;
22447 if (!done_p && !TARGET_POWERPC64
22448 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
22449 && memory_p
22450 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
22452 rtx addr = XEXP (x, 0);
22453 rtx off = address_offset (addr);
22455 if (off != NULL_RTX)
22457 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
22458 unsigned HOST_WIDE_INT offset = INTVAL (off);
22460 /* We need a secondary reload when our legitimate_address_p
22461 says the address is good (as otherwise the entire address
22462 will be reloaded), and we have a wrap.
22464 legitimate_lo_sum_address_p allows LO_SUM addresses to
22465 have any offset so test for wrap in the low 16 bits.
22467 legitimate_offset_address_p checks for the range
22468 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
22469 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
22470 [0x7ff4,0x7fff] respectively, so test for the
22471 intersection of these ranges, [0x7ffc,0x7fff] and
22472 [0x7ff4,0x7ff7] respectively.
22474 Note that the address we see here may have been
22475 manipulated by legitimize_reload_address. */
22476 if (GET_CODE (addr) == LO_SUM
22477 ? ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra
22478 : offset - (0x8000 - extra) < UNITS_PER_WORD)
22480 if (in_p)
22481 sri->icode = CODE_FOR_reload_si_load;
22482 else
22483 sri->icode = CODE_FOR_reload_si_store;
22484 sri->extra_cost = 2;
22485 ret = NO_REGS;
22486 done_p = true;
22488 else
22489 default_p = true;
22491 else
22492 default_p = true;
22495 if (!done_p)
22496 default_p = true;
22498 if (default_p)
22499 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
22501 gcc_assert (ret != ALL_REGS);
22503 if (TARGET_DEBUG_ADDR)
22505 fprintf (stderr,
22506 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
22507 "mode = %s",
22508 reg_class_names[ret],
22509 in_p ? "true" : "false",
22510 reg_class_names[rclass],
22511 GET_MODE_NAME (mode));
22513 if (reload_completed)
22514 fputs (", after reload", stderr);
22516 if (!done_p)
22517 fputs (", done_p not set", stderr);
22519 if (default_p)
22520 fputs (", default secondary reload", stderr);
22522 if (sri->icode != CODE_FOR_nothing)
22523 fprintf (stderr, ", reload func = %s, extra cost = %d",
22524 insn_data[sri->icode].name, sri->extra_cost);
22526 else if (sri->extra_cost > 0)
22527 fprintf (stderr, ", extra cost = %d", sri->extra_cost);
22529 fputs ("\n", stderr);
22530 debug_rtx (x);
22533 return ret;
22536 /* Better tracing for rs6000_secondary_reload_inner. */
22538 static void
22539 rs6000_secondary_reload_trace (int line, rtx reg, rtx mem, rtx scratch,
22540 bool store_p)
22542 rtx set, clobber;
22544 gcc_assert (reg != NULL_RTX && mem != NULL_RTX && scratch != NULL_RTX);
22546 fprintf (stderr, "rs6000_secondary_reload_inner:%d, type = %s\n", line,
22547 store_p ? "store" : "load");
22549 if (store_p)
22550 set = gen_rtx_SET (mem, reg);
22551 else
22552 set = gen_rtx_SET (reg, mem);
22554 clobber = gen_rtx_CLOBBER (VOIDmode, scratch);
22555 debug_rtx (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber)));
22558 static void rs6000_secondary_reload_fail (int, rtx, rtx, rtx, bool)
22559 ATTRIBUTE_NORETURN;
22561 static void
22562 rs6000_secondary_reload_fail (int line, rtx reg, rtx mem, rtx scratch,
22563 bool store_p)
22565 rs6000_secondary_reload_trace (line, reg, mem, scratch, store_p);
22566 gcc_unreachable ();
22569 /* Fixup reload addresses for values in GPR, FPR, and VMX registers that have
22570 reload helper functions. These were identified in
22571 rs6000_secondary_reload_memory, and if reload decided to use the secondary
22572 reload, it calls the insns:
22573 reload_<RELOAD:mode>_<P:mptrsize>_store
22574 reload_<RELOAD:mode>_<P:mptrsize>_load
22576 which in turn calls this function, to do whatever is necessary to create
22577 valid addresses. */
22579 void
22580 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
22582 int regno = true_regnum (reg);
22583 machine_mode mode = GET_MODE (reg);
22584 addr_mask_type addr_mask;
22585 rtx addr;
22586 rtx new_addr;
22587 rtx op_reg, op0, op1;
22588 rtx and_op;
22589 rtx cc_clobber;
22590 rtvec rv;
22592 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER || !MEM_P (mem)
22593 || !base_reg_operand (scratch, GET_MODE (scratch)))
22594 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
22596 if (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO))
22597 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
22599 else if (IN_RANGE (regno, FIRST_FPR_REGNO, LAST_FPR_REGNO))
22600 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
22602 else if (IN_RANGE (regno, FIRST_ALTIVEC_REGNO, LAST_ALTIVEC_REGNO))
22603 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
22605 else
22606 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
22608 /* Make sure the mode is valid in this register class. */
22609 if ((addr_mask & RELOAD_REG_VALID) == 0)
22610 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
22612 if (TARGET_DEBUG_ADDR)
22613 rs6000_secondary_reload_trace (__LINE__, reg, mem, scratch, store_p);
22615 new_addr = addr = XEXP (mem, 0);
22616 switch (GET_CODE (addr))
22618 /* Does the register class support auto update forms for this mode? If
22619 not, do the update now. We don't need a scratch register, since the
22620 powerpc only supports PRE_INC, PRE_DEC, and PRE_MODIFY. */
22621 case PRE_INC:
22622 case PRE_DEC:
22623 op_reg = XEXP (addr, 0);
22624 if (!base_reg_operand (op_reg, Pmode))
22625 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
22627 if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
22629 emit_insn (gen_add2_insn (op_reg, GEN_INT (GET_MODE_SIZE (mode))));
22630 new_addr = op_reg;
22632 break;
22634 case PRE_MODIFY:
22635 op0 = XEXP (addr, 0);
22636 op1 = XEXP (addr, 1);
22637 if (!base_reg_operand (op0, Pmode)
22638 || GET_CODE (op1) != PLUS
22639 || !rtx_equal_p (op0, XEXP (op1, 0)))
22640 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
22642 if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
22644 emit_insn (gen_rtx_SET (op0, op1));
22645 new_addr = reg;
22647 break;
22649 /* Do we need to simulate AND -16 to clear the bottom address bits used
22650 in VMX load/stores? */
22651 case AND:
22652 op0 = XEXP (addr, 0);
22653 op1 = XEXP (addr, 1);
22654 if ((addr_mask & RELOAD_REG_AND_M16) == 0)
22656 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
22657 op_reg = op0;
22659 else if (GET_CODE (op1) == PLUS)
22661 emit_insn (gen_rtx_SET (scratch, op1));
22662 op_reg = scratch;
22665 else
22666 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
22668 and_op = gen_rtx_AND (GET_MODE (scratch), op_reg, op1);
22669 cc_clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (CCmode));
22670 rv = gen_rtvec (2, gen_rtx_SET (scratch, and_op), cc_clobber);
22671 emit_insn (gen_rtx_PARALLEL (VOIDmode, rv));
22672 new_addr = scratch;
22674 break;
22676 /* If this is an indirect address, make sure it is a base register. */
22677 case REG:
22678 case SUBREG:
22679 if (!base_reg_operand (addr, GET_MODE (addr)))
22681 emit_insn (gen_rtx_SET (scratch, addr));
22682 new_addr = scratch;
22684 break;
22686 /* If this is an indexed address, make sure the register class can handle
22687 indexed addresses for this mode. */
22688 case PLUS:
22689 op0 = XEXP (addr, 0);
22690 op1 = XEXP (addr, 1);
22691 if (!base_reg_operand (op0, Pmode))
22692 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
22694 else if (int_reg_operand (op1, Pmode))
22696 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
22698 emit_insn (gen_rtx_SET (scratch, addr));
22699 new_addr = scratch;
22703 else if (mode_supports_vsx_dform_quad (mode) && CONST_INT_P (op1))
22705 if (((addr_mask & RELOAD_REG_QUAD_OFFSET) == 0)
22706 || !quad_address_p (addr, mode, false))
22708 emit_insn (gen_rtx_SET (scratch, addr));
22709 new_addr = scratch;
22713 /* Make sure the register class can handle offset addresses. */
22714 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
22716 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
22718 emit_insn (gen_rtx_SET (scratch, addr));
22719 new_addr = scratch;
22723 else
22724 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
22726 break;
22728 case LO_SUM:
22729 op0 = XEXP (addr, 0);
22730 op1 = XEXP (addr, 1);
22731 if (!base_reg_operand (op0, Pmode))
22732 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
22734 else if (int_reg_operand (op1, Pmode))
22736 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
22738 emit_insn (gen_rtx_SET (scratch, addr));
22739 new_addr = scratch;
22743 /* Quad offsets are restricted and can't handle normal addresses. */
22744 else if (mode_supports_vsx_dform_quad (mode))
22746 emit_insn (gen_rtx_SET (scratch, addr));
22747 new_addr = scratch;
22750 /* Make sure the register class can handle offset addresses. */
22751 else if (legitimate_lo_sum_address_p (mode, addr, false))
22753 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
22755 emit_insn (gen_rtx_SET (scratch, addr));
22756 new_addr = scratch;
22760 else
22761 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
22763 break;
22765 case SYMBOL_REF:
22766 case CONST:
22767 case LABEL_REF:
22768 rs6000_emit_move (scratch, addr, Pmode);
22769 new_addr = scratch;
22770 break;
22772 default:
22773 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
22776 /* Adjust the address if it changed. */
22777 if (addr != new_addr)
22779 mem = replace_equiv_address_nv (mem, new_addr);
22780 if (TARGET_DEBUG_ADDR)
22781 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
22784 /* Now create the move. */
22785 if (store_p)
22786 emit_insn (gen_rtx_SET (mem, reg));
22787 else
22788 emit_insn (gen_rtx_SET (reg, mem));
22790 return;
22793 /* Convert reloads involving 64-bit gprs and misaligned offset
22794 addressing, or multiple 32-bit gprs and offsets that are too large,
22795 to use indirect addressing. */
22797 void
22798 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
22800 int regno = true_regnum (reg);
22801 enum reg_class rclass;
22802 rtx addr;
22803 rtx scratch_or_premodify = scratch;
22805 if (TARGET_DEBUG_ADDR)
22807 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
22808 store_p ? "store" : "load");
22809 fprintf (stderr, "reg:\n");
22810 debug_rtx (reg);
22811 fprintf (stderr, "mem:\n");
22812 debug_rtx (mem);
22813 fprintf (stderr, "scratch:\n");
22814 debug_rtx (scratch);
22817 gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
22818 gcc_assert (GET_CODE (mem) == MEM);
22819 rclass = REGNO_REG_CLASS (regno);
22820 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
22821 addr = XEXP (mem, 0);
22823 if (GET_CODE (addr) == PRE_MODIFY)
22825 gcc_assert (REG_P (XEXP (addr, 0))
22826 && GET_CODE (XEXP (addr, 1)) == PLUS
22827 && XEXP (XEXP (addr, 1), 0) == XEXP (addr, 0));
22828 scratch_or_premodify = XEXP (addr, 0);
22829 if (!HARD_REGISTER_P (scratch_or_premodify))
22830 /* If we have a pseudo here then reload will have arranged
22831 to have it replaced, but only in the original insn.
22832 Use the replacement here too. */
22833 scratch_or_premodify = find_replacement (&XEXP (addr, 0));
22835 /* RTL emitted by rs6000_secondary_reload_gpr uses RTL
22836 expressions from the original insn, without unsharing them.
22837 Any RTL that points into the original insn will of course
22838 have register replacements applied. That is why we don't
22839 need to look for replacements under the PLUS. */
22840 addr = XEXP (addr, 1);
22842 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
22844 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
22846 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
22848 /* Now create the move. */
22849 if (store_p)
22850 emit_insn (gen_rtx_SET (mem, reg));
22851 else
22852 emit_insn (gen_rtx_SET (reg, mem));
22854 return;
22857 /* Allocate a 64-bit stack slot to be used for copying SDmode values through if
22858 this function has any SDmode references. If we are on a power7 or later, we
22859 don't need the 64-bit stack slot since the LFIWZX and STIFWX instructions
22860 can load/store the value. */
22862 static void
22863 rs6000_alloc_sdmode_stack_slot (void)
22865 tree t;
22866 basic_block bb;
22867 gimple_stmt_iterator gsi;
22869 gcc_assert (cfun->machine->sdmode_stack_slot == NULL_RTX);
22870 /* We use a different approach for dealing with the secondary
22871 memory in LRA. */
22872 if (ira_use_lra_p)
22873 return;
22875 if (TARGET_NO_SDMODE_STACK)
22876 return;
22878 FOR_EACH_BB_FN (bb, cfun)
22879 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
22881 tree ret = walk_gimple_op (gsi_stmt (gsi), rs6000_check_sdmode, NULL);
22882 if (ret)
22884 rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
22885 cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
22886 SDmode, 0);
22887 return;
22891 /* Check for any SDmode parameters of the function. */
22892 for (t = DECL_ARGUMENTS (cfun->decl); t; t = DECL_CHAIN (t))
22894 if (TREE_TYPE (t) == error_mark_node)
22895 continue;
22897 if (TYPE_MODE (TREE_TYPE (t)) == SDmode
22898 || TYPE_MODE (DECL_ARG_TYPE (t)) == SDmode)
22900 rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
22901 cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
22902 SDmode, 0);
22903 return;
22908 static void
22909 rs6000_instantiate_decls (void)
22911 if (cfun->machine->sdmode_stack_slot != NULL_RTX)
22912 instantiate_decl_rtl (cfun->machine->sdmode_stack_slot);
22915 /* Given an rtx X being reloaded into a reg required to be
22916 in class CLASS, return the class of reg to actually use.
22917 In general this is just CLASS; but on some machines
22918 in some cases it is preferable to use a more restrictive class.
22920 On the RS/6000, we have to return NO_REGS when we want to reload a
22921 floating-point CONST_DOUBLE to force it to be copied to memory.
22923 We also don't want to reload integer values into floating-point
22924 registers if we can at all help it. In fact, this can
22925 cause reload to die, if it tries to generate a reload of CTR
22926 into a FP register and discovers it doesn't have the memory location
22927 required.
22929 ??? Would it be a good idea to have reload do the converse, that is
22930 try to reload floating modes into FP registers if possible?
22933 static enum reg_class
22934 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
22936 machine_mode mode = GET_MODE (x);
22937 bool is_constant = CONSTANT_P (x);
22939 /* If a mode can't go in FPR/ALTIVEC/VSX registers, don't return a preferred
22940 reload class for it. */
22941 if ((rclass == ALTIVEC_REGS || rclass == VSX_REGS)
22942 && (reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID) == 0)
22943 return NO_REGS;
22945 if ((rclass == FLOAT_REGS || rclass == VSX_REGS)
22946 && (reg_addr[mode].addr_mask[RELOAD_REG_FPR] & RELOAD_REG_VALID) == 0)
22947 return NO_REGS;
22949 /* For VSX, see if we should prefer FLOAT_REGS or ALTIVEC_REGS. Do not allow
22950 the reloading of address expressions using PLUS into floating point
22951 registers. */
22952 if (TARGET_VSX && VSX_REG_CLASS_P (rclass) && GET_CODE (x) != PLUS)
22954 if (is_constant)
22956 /* Zero is always allowed in all VSX registers. */
22957 if (x == CONST0_RTX (mode))
22958 return rclass;
22960 /* If this is a vector constant that can be formed with a few Altivec
22961 instructions, we want altivec registers. */
22962 if (GET_CODE (x) == CONST_VECTOR && easy_vector_constant (x, mode))
22963 return ALTIVEC_REGS;
22965 /* If this is an integer constant that can easily be loaded into
22966 vector registers, allow it. */
22967 if (CONST_INT_P (x))
22969 HOST_WIDE_INT value = INTVAL (x);
22971 /* ISA 2.07 can generate -1 in all registers with XXLORC. ISA
22972 2.06 can generate it in the Altivec registers with
22973 VSPLTI<x>. */
22974 if (value == -1)
22976 if (TARGET_P8_VECTOR)
22977 return rclass;
22978 else if (rclass == ALTIVEC_REGS || rclass == VSX_REGS)
22979 return ALTIVEC_REGS;
22980 else
22981 return NO_REGS;
22984 /* ISA 3.0 can load -128..127 using the XXSPLTIB instruction and
22985 a sign extend in the Altivec registers. */
22986 if (IN_RANGE (value, -128, 127) && TARGET_P9_VECTOR
22987 && TARGET_VSX_SMALL_INTEGER
22988 && (rclass == ALTIVEC_REGS || rclass == VSX_REGS))
22989 return ALTIVEC_REGS;
22992 /* Force constant to memory. */
22993 return NO_REGS;
22996 /* D-form addressing can easily reload the value. */
22997 if (mode_supports_vmx_dform (mode)
22998 || mode_supports_vsx_dform_quad (mode))
22999 return rclass;
23001 /* If this is a scalar floating point value and we don't have D-form
23002 addressing, prefer the traditional floating point registers so that we
23003 can use D-form (register+offset) addressing. */
23004 if (rclass == VSX_REGS
23005 && (mode == SFmode || GET_MODE_SIZE (mode) == 8))
23006 return FLOAT_REGS;
23008 /* Prefer the Altivec registers if Altivec is handling the vector
23009 operations (i.e. V16QI, V8HI, and V4SI), or if we prefer Altivec
23010 loads. */
23011 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode)
23012 || mode == V1TImode)
23013 return ALTIVEC_REGS;
23015 return rclass;
23018 if (is_constant || GET_CODE (x) == PLUS)
23020 if (reg_class_subset_p (GENERAL_REGS, rclass))
23021 return GENERAL_REGS;
23022 if (reg_class_subset_p (BASE_REGS, rclass))
23023 return BASE_REGS;
23024 return NO_REGS;
23027 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == NON_SPECIAL_REGS)
23028 return GENERAL_REGS;
23030 return rclass;
23033 /* Debug version of rs6000_preferred_reload_class. */
23034 static enum reg_class
23035 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
23037 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
23039 fprintf (stderr,
23040 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
23041 "mode = %s, x:\n",
23042 reg_class_names[ret], reg_class_names[rclass],
23043 GET_MODE_NAME (GET_MODE (x)));
23044 debug_rtx (x);
23046 return ret;
23049 /* If we are copying between FP or AltiVec registers and anything else, we need
23050 a memory location. The exception is when we are targeting ppc64 and the
23051 move to/from fpr to gpr instructions are available. Also, under VSX, you
23052 can copy vector registers from the FP register set to the Altivec register
23053 set and vice versa. */
23055 static bool
23056 rs6000_secondary_memory_needed (enum reg_class from_class,
23057 enum reg_class to_class,
23058 machine_mode mode)
23060 enum rs6000_reg_type from_type, to_type;
23061 bool altivec_p = ((from_class == ALTIVEC_REGS)
23062 || (to_class == ALTIVEC_REGS));
23064 /* If a simple/direct move is available, we don't need secondary memory */
23065 from_type = reg_class_to_reg_type[(int)from_class];
23066 to_type = reg_class_to_reg_type[(int)to_class];
23068 if (rs6000_secondary_reload_move (to_type, from_type, mode,
23069 (secondary_reload_info *)0, altivec_p))
23070 return false;
23072 /* If we have a floating point or vector register class, we need to use
23073 memory to transfer the data. */
23074 if (IS_FP_VECT_REG_TYPE (from_type) || IS_FP_VECT_REG_TYPE (to_type))
23075 return true;
23077 return false;
23080 /* Debug version of rs6000_secondary_memory_needed. */
23081 static bool
23082 rs6000_debug_secondary_memory_needed (enum reg_class from_class,
23083 enum reg_class to_class,
23084 machine_mode mode)
23086 bool ret = rs6000_secondary_memory_needed (from_class, to_class, mode);
23088 fprintf (stderr,
23089 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
23090 "to_class = %s, mode = %s\n",
23091 ret ? "true" : "false",
23092 reg_class_names[from_class],
23093 reg_class_names[to_class],
23094 GET_MODE_NAME (mode));
23096 return ret;
23099 /* Return the register class of a scratch register needed to copy IN into
23100 or out of a register in RCLASS in MODE. If it can be done directly,
23101 NO_REGS is returned. */
23103 static enum reg_class
23104 rs6000_secondary_reload_class (enum reg_class rclass, machine_mode mode,
23105 rtx in)
23107 int regno;
23109 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
23110 #if TARGET_MACHO
23111 && MACHOPIC_INDIRECT
23112 #endif
23115 /* We cannot copy a symbolic operand directly into anything
23116 other than BASE_REGS for TARGET_ELF. So indicate that a
23117 register from BASE_REGS is needed as an intermediate
23118 register.
23120 On Darwin, pic addresses require a load from memory, which
23121 needs a base register. */
23122 if (rclass != BASE_REGS
23123 && (GET_CODE (in) == SYMBOL_REF
23124 || GET_CODE (in) == HIGH
23125 || GET_CODE (in) == LABEL_REF
23126 || GET_CODE (in) == CONST))
23127 return BASE_REGS;
23130 if (GET_CODE (in) == REG)
23132 regno = REGNO (in);
23133 if (regno >= FIRST_PSEUDO_REGISTER)
23135 regno = true_regnum (in);
23136 if (regno >= FIRST_PSEUDO_REGISTER)
23137 regno = -1;
23140 else if (GET_CODE (in) == SUBREG)
23142 regno = true_regnum (in);
23143 if (regno >= FIRST_PSEUDO_REGISTER)
23144 regno = -1;
23146 else
23147 regno = -1;
23149 /* If we have VSX register moves, prefer moving scalar values between
23150 Altivec registers and GPR by going via an FPR (and then via memory)
23151 instead of reloading the secondary memory address for Altivec moves. */
23152 if (TARGET_VSX
23153 && GET_MODE_SIZE (mode) < 16
23154 && !mode_supports_vmx_dform (mode)
23155 && (((rclass == GENERAL_REGS || rclass == BASE_REGS)
23156 && (regno >= 0 && ALTIVEC_REGNO_P (regno)))
23157 || ((rclass == VSX_REGS || rclass == ALTIVEC_REGS)
23158 && (regno >= 0 && INT_REGNO_P (regno)))))
23159 return FLOAT_REGS;
23161 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
23162 into anything. */
23163 if (rclass == GENERAL_REGS || rclass == BASE_REGS
23164 || (regno >= 0 && INT_REGNO_P (regno)))
23165 return NO_REGS;
23167 /* Constants, memory, and VSX registers can go into VSX registers (both the
23168 traditional floating point and the altivec registers). */
23169 if (rclass == VSX_REGS
23170 && (regno == -1 || VSX_REGNO_P (regno)))
23171 return NO_REGS;
23173 /* Constants, memory, and FP registers can go into FP registers. */
23174 if ((regno == -1 || FP_REGNO_P (regno))
23175 && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
23176 return (mode != SDmode || lra_in_progress) ? NO_REGS : GENERAL_REGS;
23178 /* Memory, and AltiVec registers can go into AltiVec registers. */
23179 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
23180 && rclass == ALTIVEC_REGS)
23181 return NO_REGS;
23183 /* We can copy among the CR registers. */
23184 if ((rclass == CR_REGS || rclass == CR0_REGS)
23185 && regno >= 0 && CR_REGNO_P (regno))
23186 return NO_REGS;
23188 /* Otherwise, we need GENERAL_REGS. */
23189 return GENERAL_REGS;
23192 /* Debug version of rs6000_secondary_reload_class. */
23193 static enum reg_class
23194 rs6000_debug_secondary_reload_class (enum reg_class rclass,
23195 machine_mode mode, rtx in)
23197 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
23198 fprintf (stderr,
23199 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
23200 "mode = %s, input rtx:\n",
23201 reg_class_names[ret], reg_class_names[rclass],
23202 GET_MODE_NAME (mode));
23203 debug_rtx (in);
23205 return ret;
23208 /* Return nonzero if for CLASS a mode change from FROM to TO is invalid. */
23210 static bool
23211 rs6000_cannot_change_mode_class (machine_mode from,
23212 machine_mode to,
23213 enum reg_class rclass)
23215 unsigned from_size = GET_MODE_SIZE (from);
23216 unsigned to_size = GET_MODE_SIZE (to);
23218 if (from_size != to_size)
23220 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
23222 if (reg_classes_intersect_p (xclass, rclass))
23224 unsigned to_nregs = hard_regno_nregs[FIRST_FPR_REGNO][to];
23225 unsigned from_nregs = hard_regno_nregs[FIRST_FPR_REGNO][from];
23226 bool to_float128_vector_p = FLOAT128_VECTOR_P (to);
23227 bool from_float128_vector_p = FLOAT128_VECTOR_P (from);
23229 /* Don't allow 64-bit types to overlap with 128-bit types that take a
23230 single register under VSX because the scalar part of the register
23231 is in the upper 64-bits, and not the lower 64-bits. Types like
23232 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
23233 IEEE floating point can't overlap, and neither can small
23234 values. */
23236 if (to_float128_vector_p && from_float128_vector_p)
23237 return false;
23239 else if (to_float128_vector_p || from_float128_vector_p)
23240 return true;
23242 /* TDmode in floating-mode registers must always go into a register
23243 pair with the most significant word in the even-numbered register
23244 to match ISA requirements. In little-endian mode, this does not
23245 match subreg numbering, so we cannot allow subregs. */
23246 if (!BYTES_BIG_ENDIAN && (to == TDmode || from == TDmode))
23247 return true;
23249 if (from_size < 8 || to_size < 8)
23250 return true;
23252 if (from_size == 8 && (8 * to_nregs) != to_size)
23253 return true;
23255 if (to_size == 8 && (8 * from_nregs) != from_size)
23256 return true;
23258 return false;
23260 else
23261 return false;
23264 if (TARGET_E500_DOUBLE
23265 && ((((to) == DFmode) + ((from) == DFmode)) == 1
23266 || (((to) == TFmode) + ((from) == TFmode)) == 1
23267 || (((to) == IFmode) + ((from) == IFmode)) == 1
23268 || (((to) == KFmode) + ((from) == KFmode)) == 1
23269 || (((to) == DDmode) + ((from) == DDmode)) == 1
23270 || (((to) == TDmode) + ((from) == TDmode)) == 1
23271 || (((to) == DImode) + ((from) == DImode)) == 1))
23272 return true;
23274 /* Since the VSX register set includes traditional floating point registers
23275 and altivec registers, just check for the size being different instead of
23276 trying to check whether the modes are vector modes. Otherwise it won't
23277 allow say DF and DI to change classes. For types like TFmode and TDmode
23278 that take 2 64-bit registers, rather than a single 128-bit register, don't
23279 allow subregs of those types to other 128 bit types. */
23280 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
23282 unsigned num_regs = (from_size + 15) / 16;
23283 if (hard_regno_nregs[FIRST_FPR_REGNO][to] > num_regs
23284 || hard_regno_nregs[FIRST_FPR_REGNO][from] > num_regs)
23285 return true;
23287 return (from_size != 8 && from_size != 16);
23290 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
23291 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
23292 return true;
23294 if (TARGET_SPE && (SPE_VECTOR_MODE (from) + SPE_VECTOR_MODE (to)) == 1
23295 && reg_classes_intersect_p (GENERAL_REGS, rclass))
23296 return true;
23298 return false;
23301 /* Debug version of rs6000_cannot_change_mode_class. */
23302 static bool
23303 rs6000_debug_cannot_change_mode_class (machine_mode from,
23304 machine_mode to,
23305 enum reg_class rclass)
23307 bool ret = rs6000_cannot_change_mode_class (from, to, rclass);
23309 fprintf (stderr,
23310 "rs6000_cannot_change_mode_class, return %s, from = %s, "
23311 "to = %s, rclass = %s\n",
23312 ret ? "true" : "false",
23313 GET_MODE_NAME (from), GET_MODE_NAME (to),
23314 reg_class_names[rclass]);
23316 return ret;
23319 /* Return a string to do a move operation of 128 bits of data. */
23321 const char *
23322 rs6000_output_move_128bit (rtx operands[])
23324 rtx dest = operands[0];
23325 rtx src = operands[1];
23326 machine_mode mode = GET_MODE (dest);
23327 int dest_regno;
23328 int src_regno;
23329 bool dest_gpr_p, dest_fp_p, dest_vmx_p, dest_vsx_p;
23330 bool src_gpr_p, src_fp_p, src_vmx_p, src_vsx_p;
23332 if (REG_P (dest))
23334 dest_regno = REGNO (dest);
23335 dest_gpr_p = INT_REGNO_P (dest_regno);
23336 dest_fp_p = FP_REGNO_P (dest_regno);
23337 dest_vmx_p = ALTIVEC_REGNO_P (dest_regno);
23338 dest_vsx_p = dest_fp_p | dest_vmx_p;
23340 else
23342 dest_regno = -1;
23343 dest_gpr_p = dest_fp_p = dest_vmx_p = dest_vsx_p = false;
23346 if (REG_P (src))
23348 src_regno = REGNO (src);
23349 src_gpr_p = INT_REGNO_P (src_regno);
23350 src_fp_p = FP_REGNO_P (src_regno);
23351 src_vmx_p = ALTIVEC_REGNO_P (src_regno);
23352 src_vsx_p = src_fp_p | src_vmx_p;
23354 else
23356 src_regno = -1;
23357 src_gpr_p = src_fp_p = src_vmx_p = src_vsx_p = false;
23360 /* Register moves. */
23361 if (dest_regno >= 0 && src_regno >= 0)
23363 if (dest_gpr_p)
23365 if (src_gpr_p)
23366 return "#";
23368 if (TARGET_DIRECT_MOVE_128 && src_vsx_p)
23369 return (WORDS_BIG_ENDIAN
23370 ? "mfvsrd %0,%x1\n\tmfvsrld %L0,%x1"
23371 : "mfvsrd %L0,%x1\n\tmfvsrld %0,%x1");
23373 else if (TARGET_VSX && TARGET_DIRECT_MOVE && src_vsx_p)
23374 return "#";
23377 else if (TARGET_VSX && dest_vsx_p)
23379 if (src_vsx_p)
23380 return "xxlor %x0,%x1,%x1";
23382 else if (TARGET_DIRECT_MOVE_128 && src_gpr_p)
23383 return (WORDS_BIG_ENDIAN
23384 ? "mtvsrdd %x0,%1,%L1"
23385 : "mtvsrdd %x0,%L1,%1");
23387 else if (TARGET_DIRECT_MOVE && src_gpr_p)
23388 return "#";
23391 else if (TARGET_ALTIVEC && dest_vmx_p && src_vmx_p)
23392 return "vor %0,%1,%1";
23394 else if (dest_fp_p && src_fp_p)
23395 return "#";
23398 /* Loads. */
23399 else if (dest_regno >= 0 && MEM_P (src))
23401 if (dest_gpr_p)
23403 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
23404 return "lq %0,%1";
23405 else
23406 return "#";
23409 else if (TARGET_ALTIVEC && dest_vmx_p
23410 && altivec_indexed_or_indirect_operand (src, mode))
23411 return "lvx %0,%y1";
23413 else if (TARGET_VSX && dest_vsx_p)
23415 if (mode_supports_vsx_dform_quad (mode)
23416 && quad_address_p (XEXP (src, 0), mode, true))
23417 return "lxv %x0,%1";
23419 else if (TARGET_P9_VECTOR)
23420 return "lxvx %x0,%y1";
23422 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
23423 return "lxvw4x %x0,%y1";
23425 else
23426 return "lxvd2x %x0,%y1";
23429 else if (TARGET_ALTIVEC && dest_vmx_p)
23430 return "lvx %0,%y1";
23432 else if (dest_fp_p)
23433 return "#";
23436 /* Stores. */
23437 else if (src_regno >= 0 && MEM_P (dest))
23439 if (src_gpr_p)
23441 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
23442 return "stq %1,%0";
23443 else
23444 return "#";
23447 else if (TARGET_ALTIVEC && src_vmx_p
23448 && altivec_indexed_or_indirect_operand (src, mode))
23449 return "stvx %1,%y0";
23451 else if (TARGET_VSX && src_vsx_p)
23453 if (mode_supports_vsx_dform_quad (mode)
23454 && quad_address_p (XEXP (dest, 0), mode, true))
23455 return "stxv %x1,%0";
23457 else if (TARGET_P9_VECTOR)
23458 return "stxvx %x1,%y0";
23460 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
23461 return "stxvw4x %x1,%y0";
23463 else
23464 return "stxvd2x %x1,%y0";
23467 else if (TARGET_ALTIVEC && src_vmx_p)
23468 return "stvx %1,%y0";
23470 else if (src_fp_p)
23471 return "#";
23474 /* Constants. */
23475 else if (dest_regno >= 0
23476 && (GET_CODE (src) == CONST_INT
23477 || GET_CODE (src) == CONST_WIDE_INT
23478 || GET_CODE (src) == CONST_DOUBLE
23479 || GET_CODE (src) == CONST_VECTOR))
23481 if (dest_gpr_p)
23482 return "#";
23484 else if ((dest_vmx_p && TARGET_ALTIVEC)
23485 || (dest_vsx_p && TARGET_VSX))
23486 return output_vec_const_move (operands);
23489 fatal_insn ("Bad 128-bit move", gen_rtx_SET (dest, src));
23492 /* Validate a 128-bit move. */
23493 bool
23494 rs6000_move_128bit_ok_p (rtx operands[])
23496 machine_mode mode = GET_MODE (operands[0]);
23497 return (gpc_reg_operand (operands[0], mode)
23498 || gpc_reg_operand (operands[1], mode));
23501 /* Return true if a 128-bit move needs to be split. */
23502 bool
23503 rs6000_split_128bit_ok_p (rtx operands[])
23505 if (!reload_completed)
23506 return false;
23508 if (!gpr_or_gpr_p (operands[0], operands[1]))
23509 return false;
23511 if (quad_load_store_p (operands[0], operands[1]))
23512 return false;
23514 return true;
23518 /* Given a comparison operation, return the bit number in CCR to test. We
23519 know this is a valid comparison.
23521 SCC_P is 1 if this is for an scc. That means that %D will have been
23522 used instead of %C, so the bits will be in different places.
23524 Return -1 if OP isn't a valid comparison for some reason. */
23527 ccr_bit (rtx op, int scc_p)
23529 enum rtx_code code = GET_CODE (op);
23530 machine_mode cc_mode;
23531 int cc_regnum;
23532 int base_bit;
23533 rtx reg;
23535 if (!COMPARISON_P (op))
23536 return -1;
23538 reg = XEXP (op, 0);
23540 gcc_assert (GET_CODE (reg) == REG && CR_REGNO_P (REGNO (reg)));
23542 cc_mode = GET_MODE (reg);
23543 cc_regnum = REGNO (reg);
23544 base_bit = 4 * (cc_regnum - CR0_REGNO);
23546 validate_condition_mode (code, cc_mode);
23548 /* When generating a sCOND operation, only positive conditions are
23549 allowed. */
23550 gcc_assert (!scc_p
23551 || code == EQ || code == GT || code == LT || code == UNORDERED
23552 || code == GTU || code == LTU);
23554 switch (code)
23556 case NE:
23557 return scc_p ? base_bit + 3 : base_bit + 2;
23558 case EQ:
23559 return base_bit + 2;
23560 case GT: case GTU: case UNLE:
23561 return base_bit + 1;
23562 case LT: case LTU: case UNGE:
23563 return base_bit;
23564 case ORDERED: case UNORDERED:
23565 return base_bit + 3;
23567 case GE: case GEU:
23568 /* If scc, we will have done a cror to put the bit in the
23569 unordered position. So test that bit. For integer, this is ! LT
23570 unless this is an scc insn. */
23571 return scc_p ? base_bit + 3 : base_bit;
23573 case LE: case LEU:
23574 return scc_p ? base_bit + 3 : base_bit + 1;
23576 default:
23577 gcc_unreachable ();
23581 /* Return the GOT register. */
23584 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
23586 /* The second flow pass currently (June 1999) can't update
23587 regs_ever_live without disturbing other parts of the compiler, so
23588 update it here to make the prolog/epilogue code happy. */
23589 if (!can_create_pseudo_p ()
23590 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
23591 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
23593 crtl->uses_pic_offset_table = 1;
23595 return pic_offset_table_rtx;
23598 static rs6000_stack_t stack_info;
23600 /* Function to init struct machine_function.
23601 This will be called, via a pointer variable,
23602 from push_function_context. */
23604 static struct machine_function *
23605 rs6000_init_machine_status (void)
23607 stack_info.reload_completed = 0;
23608 return ggc_cleared_alloc<machine_function> ();
23611 #define INT_P(X) (GET_CODE (X) == CONST_INT && GET_MODE (X) == VOIDmode)
23613 /* Write out a function code label. */
23615 void
23616 rs6000_output_function_entry (FILE *file, const char *fname)
23618 if (fname[0] != '.')
23620 switch (DEFAULT_ABI)
23622 default:
23623 gcc_unreachable ();
23625 case ABI_AIX:
23626 if (DOT_SYMBOLS)
23627 putc ('.', file);
23628 else
23629 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
23630 break;
23632 case ABI_ELFv2:
23633 case ABI_V4:
23634 case ABI_DARWIN:
23635 break;
23639 RS6000_OUTPUT_BASENAME (file, fname);
23642 /* Print an operand. Recognize special options, documented below. */
23644 #if TARGET_ELF
23645 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
23646 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
23647 #else
23648 #define SMALL_DATA_RELOC "sda21"
23649 #define SMALL_DATA_REG 0
23650 #endif
23652 void
23653 print_operand (FILE *file, rtx x, int code)
23655 int i;
23656 unsigned HOST_WIDE_INT uval;
23658 switch (code)
23660 /* %a is output_address. */
23662 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
23663 output_operand. */
23665 case 'D':
23666 /* Like 'J' but get to the GT bit only. */
23667 gcc_assert (REG_P (x));
23669 /* Bit 1 is GT bit. */
23670 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
23672 /* Add one for shift count in rlinm for scc. */
23673 fprintf (file, "%d", i + 1);
23674 return;
23676 case 'e':
23677 /* If the low 16 bits are 0, but some other bit is set, write 's'. */
23678 if (! INT_P (x))
23680 output_operand_lossage ("invalid %%e value");
23681 return;
23684 uval = INTVAL (x);
23685 if ((uval & 0xffff) == 0 && uval != 0)
23686 putc ('s', file);
23687 return;
23689 case 'E':
23690 /* X is a CR register. Print the number of the EQ bit of the CR */
23691 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
23692 output_operand_lossage ("invalid %%E value");
23693 else
23694 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
23695 return;
23697 case 'f':
23698 /* X is a CR register. Print the shift count needed to move it
23699 to the high-order four bits. */
23700 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
23701 output_operand_lossage ("invalid %%f value");
23702 else
23703 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
23704 return;
23706 case 'F':
23707 /* Similar, but print the count for the rotate in the opposite
23708 direction. */
23709 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
23710 output_operand_lossage ("invalid %%F value");
23711 else
23712 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
23713 return;
23715 case 'G':
23716 /* X is a constant integer. If it is negative, print "m",
23717 otherwise print "z". This is to make an aze or ame insn. */
23718 if (GET_CODE (x) != CONST_INT)
23719 output_operand_lossage ("invalid %%G value");
23720 else if (INTVAL (x) >= 0)
23721 putc ('z', file);
23722 else
23723 putc ('m', file);
23724 return;
23726 case 'h':
23727 /* If constant, output low-order five bits. Otherwise, write
23728 normally. */
23729 if (INT_P (x))
23730 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 31);
23731 else
23732 print_operand (file, x, 0);
23733 return;
23735 case 'H':
23736 /* If constant, output low-order six bits. Otherwise, write
23737 normally. */
23738 if (INT_P (x))
23739 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 63);
23740 else
23741 print_operand (file, x, 0);
23742 return;
23744 case 'I':
23745 /* Print `i' if this is a constant, else nothing. */
23746 if (INT_P (x))
23747 putc ('i', file);
23748 return;
23750 case 'j':
23751 /* Write the bit number in CCR for jump. */
23752 i = ccr_bit (x, 0);
23753 if (i == -1)
23754 output_operand_lossage ("invalid %%j code");
23755 else
23756 fprintf (file, "%d", i);
23757 return;
23759 case 'J':
23760 /* Similar, but add one for shift count in rlinm for scc and pass
23761 scc flag to `ccr_bit'. */
23762 i = ccr_bit (x, 1);
23763 if (i == -1)
23764 output_operand_lossage ("invalid %%J code");
23765 else
23766 /* If we want bit 31, write a shift count of zero, not 32. */
23767 fprintf (file, "%d", i == 31 ? 0 : i + 1);
23768 return;
23770 case 'k':
23771 /* X must be a constant. Write the 1's complement of the
23772 constant. */
23773 if (! INT_P (x))
23774 output_operand_lossage ("invalid %%k value");
23775 else
23776 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
23777 return;
23779 case 'K':
23780 /* X must be a symbolic constant on ELF. Write an
23781 expression suitable for an 'addi' that adds in the low 16
23782 bits of the MEM. */
23783 if (GET_CODE (x) == CONST)
23785 if (GET_CODE (XEXP (x, 0)) != PLUS
23786 || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
23787 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
23788 || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
23789 output_operand_lossage ("invalid %%K value");
23791 print_operand_address (file, x);
23792 fputs ("@l", file);
23793 return;
23795 /* %l is output_asm_label. */
23797 case 'L':
23798 /* Write second word of DImode or DFmode reference. Works on register
23799 or non-indexed memory only. */
23800 if (REG_P (x))
23801 fputs (reg_names[REGNO (x) + 1], file);
23802 else if (MEM_P (x))
23804 machine_mode mode = GET_MODE (x);
23805 /* Handle possible auto-increment. Since it is pre-increment and
23806 we have already done it, we can just use an offset of word. */
23807 if (GET_CODE (XEXP (x, 0)) == PRE_INC
23808 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
23809 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
23810 UNITS_PER_WORD));
23811 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
23812 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
23813 UNITS_PER_WORD));
23814 else
23815 output_address (mode, XEXP (adjust_address_nv (x, SImode,
23816 UNITS_PER_WORD),
23817 0));
23819 if (small_data_operand (x, GET_MODE (x)))
23820 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
23821 reg_names[SMALL_DATA_REG]);
23823 return;
23825 case 'N':
23826 /* Write the number of elements in the vector times 4. */
23827 if (GET_CODE (x) != PARALLEL)
23828 output_operand_lossage ("invalid %%N value");
23829 else
23830 fprintf (file, "%d", XVECLEN (x, 0) * 4);
23831 return;
23833 case 'O':
23834 /* Similar, but subtract 1 first. */
23835 if (GET_CODE (x) != PARALLEL)
23836 output_operand_lossage ("invalid %%O value");
23837 else
23838 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
23839 return;
23841 case 'p':
23842 /* X is a CONST_INT that is a power of two. Output the logarithm. */
23843 if (! INT_P (x)
23844 || INTVAL (x) < 0
23845 || (i = exact_log2 (INTVAL (x))) < 0)
23846 output_operand_lossage ("invalid %%p value");
23847 else
23848 fprintf (file, "%d", i);
23849 return;
23851 case 'P':
23852 /* The operand must be an indirect memory reference. The result
23853 is the register name. */
23854 if (GET_CODE (x) != MEM || GET_CODE (XEXP (x, 0)) != REG
23855 || REGNO (XEXP (x, 0)) >= 32)
23856 output_operand_lossage ("invalid %%P value");
23857 else
23858 fputs (reg_names[REGNO (XEXP (x, 0))], file);
23859 return;
23861 case 'q':
23862 /* This outputs the logical code corresponding to a boolean
23863 expression. The expression may have one or both operands
23864 negated (if one, only the first one). For condition register
23865 logical operations, it will also treat the negated
23866 CR codes as NOTs, but not handle NOTs of them. */
23868 const char *const *t = 0;
23869 const char *s;
23870 enum rtx_code code = GET_CODE (x);
23871 static const char * const tbl[3][3] = {
23872 { "and", "andc", "nor" },
23873 { "or", "orc", "nand" },
23874 { "xor", "eqv", "xor" } };
23876 if (code == AND)
23877 t = tbl[0];
23878 else if (code == IOR)
23879 t = tbl[1];
23880 else if (code == XOR)
23881 t = tbl[2];
23882 else
23883 output_operand_lossage ("invalid %%q value");
23885 if (GET_CODE (XEXP (x, 0)) != NOT)
23886 s = t[0];
23887 else
23889 if (GET_CODE (XEXP (x, 1)) == NOT)
23890 s = t[2];
23891 else
23892 s = t[1];
23895 fputs (s, file);
23897 return;
23899 case 'Q':
23900 if (! TARGET_MFCRF)
23901 return;
23902 fputc (',', file);
23903 /* FALLTHRU */
23905 case 'R':
23906 /* X is a CR register. Print the mask for `mtcrf'. */
23907 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
23908 output_operand_lossage ("invalid %%R value");
23909 else
23910 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
23911 return;
23913 case 's':
23914 /* Low 5 bits of 32 - value */
23915 if (! INT_P (x))
23916 output_operand_lossage ("invalid %%s value");
23917 else
23918 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INTVAL (x)) & 31);
23919 return;
23921 case 't':
23922 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
23923 gcc_assert (REG_P (x) && GET_MODE (x) == CCmode);
23925 /* Bit 3 is OV bit. */
23926 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
23928 /* If we want bit 31, write a shift count of zero, not 32. */
23929 fprintf (file, "%d", i == 31 ? 0 : i + 1);
23930 return;
23932 case 'T':
23933 /* Print the symbolic name of a branch target register. */
23934 if (GET_CODE (x) != REG || (REGNO (x) != LR_REGNO
23935 && REGNO (x) != CTR_REGNO))
23936 output_operand_lossage ("invalid %%T value");
23937 else if (REGNO (x) == LR_REGNO)
23938 fputs ("lr", file);
23939 else
23940 fputs ("ctr", file);
23941 return;
23943 case 'u':
23944 /* High-order or low-order 16 bits of constant, whichever is non-zero,
23945 for use in unsigned operand. */
23946 if (! INT_P (x))
23948 output_operand_lossage ("invalid %%u value");
23949 return;
23952 uval = INTVAL (x);
23953 if ((uval & 0xffff) == 0)
23954 uval >>= 16;
23956 fprintf (file, HOST_WIDE_INT_PRINT_HEX, uval & 0xffff);
23957 return;
23959 case 'v':
23960 /* High-order 16 bits of constant for use in signed operand. */
23961 if (! INT_P (x))
23962 output_operand_lossage ("invalid %%v value");
23963 else
23964 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
23965 (INTVAL (x) >> 16) & 0xffff);
23966 return;
23968 case 'U':
23969 /* Print `u' if this has an auto-increment or auto-decrement. */
23970 if (MEM_P (x)
23971 && (GET_CODE (XEXP (x, 0)) == PRE_INC
23972 || GET_CODE (XEXP (x, 0)) == PRE_DEC
23973 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
23974 putc ('u', file);
23975 return;
23977 case 'V':
23978 /* Print the trap code for this operand. */
23979 switch (GET_CODE (x))
23981 case EQ:
23982 fputs ("eq", file); /* 4 */
23983 break;
23984 case NE:
23985 fputs ("ne", file); /* 24 */
23986 break;
23987 case LT:
23988 fputs ("lt", file); /* 16 */
23989 break;
23990 case LE:
23991 fputs ("le", file); /* 20 */
23992 break;
23993 case GT:
23994 fputs ("gt", file); /* 8 */
23995 break;
23996 case GE:
23997 fputs ("ge", file); /* 12 */
23998 break;
23999 case LTU:
24000 fputs ("llt", file); /* 2 */
24001 break;
24002 case LEU:
24003 fputs ("lle", file); /* 6 */
24004 break;
24005 case GTU:
24006 fputs ("lgt", file); /* 1 */
24007 break;
24008 case GEU:
24009 fputs ("lge", file); /* 5 */
24010 break;
24011 default:
24012 gcc_unreachable ();
24014 break;
24016 case 'w':
24017 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
24018 normally. */
24019 if (INT_P (x))
24020 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
24021 ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
24022 else
24023 print_operand (file, x, 0);
24024 return;
24026 case 'x':
24027 /* X is a FPR or Altivec register used in a VSX context. */
24028 if (GET_CODE (x) != REG || !VSX_REGNO_P (REGNO (x)))
24029 output_operand_lossage ("invalid %%x value");
24030 else
24032 int reg = REGNO (x);
24033 int vsx_reg = (FP_REGNO_P (reg)
24034 ? reg - 32
24035 : reg - FIRST_ALTIVEC_REGNO + 32);
24037 #ifdef TARGET_REGNAMES
24038 if (TARGET_REGNAMES)
24039 fprintf (file, "%%vs%d", vsx_reg);
24040 else
24041 #endif
24042 fprintf (file, "%d", vsx_reg);
24044 return;
24046 case 'X':
24047 if (MEM_P (x)
24048 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
24049 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
24050 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
24051 putc ('x', file);
24052 return;
24054 case 'Y':
24055 /* Like 'L', for third word of TImode/PTImode */
24056 if (REG_P (x))
24057 fputs (reg_names[REGNO (x) + 2], file);
24058 else if (MEM_P (x))
24060 machine_mode mode = GET_MODE (x);
24061 if (GET_CODE (XEXP (x, 0)) == PRE_INC
24062 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
24063 output_address (mode, plus_constant (Pmode,
24064 XEXP (XEXP (x, 0), 0), 8));
24065 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
24066 output_address (mode, plus_constant (Pmode,
24067 XEXP (XEXP (x, 0), 0), 8));
24068 else
24069 output_address (mode, XEXP (adjust_address_nv (x, SImode, 8), 0));
24070 if (small_data_operand (x, GET_MODE (x)))
24071 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
24072 reg_names[SMALL_DATA_REG]);
24074 return;
24076 case 'z':
24077 /* X is a SYMBOL_REF. Write out the name preceded by a
24078 period and without any trailing data in brackets. Used for function
24079 names. If we are configured for System V (or the embedded ABI) on
24080 the PowerPC, do not emit the period, since those systems do not use
24081 TOCs and the like. */
24082 gcc_assert (GET_CODE (x) == SYMBOL_REF);
24084 /* For macho, check to see if we need a stub. */
24085 if (TARGET_MACHO)
24087 const char *name = XSTR (x, 0);
24088 #if TARGET_MACHO
24089 if (darwin_emit_branch_islands
24090 && MACHOPIC_INDIRECT
24091 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
24092 name = machopic_indirection_name (x, /*stub_p=*/true);
24093 #endif
24094 assemble_name (file, name);
24096 else if (!DOT_SYMBOLS)
24097 assemble_name (file, XSTR (x, 0));
24098 else
24099 rs6000_output_function_entry (file, XSTR (x, 0));
24100 return;
24102 case 'Z':
24103 /* Like 'L', for last word of TImode/PTImode. */
24104 if (REG_P (x))
24105 fputs (reg_names[REGNO (x) + 3], file);
24106 else if (MEM_P (x))
24108 machine_mode mode = GET_MODE (x);
24109 if (GET_CODE (XEXP (x, 0)) == PRE_INC
24110 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
24111 output_address (mode, plus_constant (Pmode,
24112 XEXP (XEXP (x, 0), 0), 12));
24113 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
24114 output_address (mode, plus_constant (Pmode,
24115 XEXP (XEXP (x, 0), 0), 12));
24116 else
24117 output_address (mode, XEXP (adjust_address_nv (x, SImode, 12), 0));
24118 if (small_data_operand (x, GET_MODE (x)))
24119 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
24120 reg_names[SMALL_DATA_REG]);
24122 return;
24124 /* Print AltiVec or SPE memory operand. */
24125 case 'y':
24127 rtx tmp;
24129 gcc_assert (MEM_P (x));
24131 tmp = XEXP (x, 0);
24133 /* Ugly hack because %y is overloaded. */
24134 if ((TARGET_SPE || TARGET_E500_DOUBLE)
24135 && (GET_MODE_SIZE (GET_MODE (x)) == 8
24136 || FLOAT128_2REG_P (GET_MODE (x))
24137 || GET_MODE (x) == TImode
24138 || GET_MODE (x) == PTImode))
24140 /* Handle [reg]. */
24141 if (REG_P (tmp))
24143 fprintf (file, "0(%s)", reg_names[REGNO (tmp)]);
24144 break;
24146 /* Handle [reg+UIMM]. */
24147 else if (GET_CODE (tmp) == PLUS &&
24148 GET_CODE (XEXP (tmp, 1)) == CONST_INT)
24150 int x;
24152 gcc_assert (REG_P (XEXP (tmp, 0)));
24154 x = INTVAL (XEXP (tmp, 1));
24155 fprintf (file, "%d(%s)", x, reg_names[REGNO (XEXP (tmp, 0))]);
24156 break;
24159 /* Fall through. Must be [reg+reg]. */
24161 if (VECTOR_MEM_ALTIVEC_P (GET_MODE (x))
24162 && GET_CODE (tmp) == AND
24163 && GET_CODE (XEXP (tmp, 1)) == CONST_INT
24164 && INTVAL (XEXP (tmp, 1)) == -16)
24165 tmp = XEXP (tmp, 0);
24166 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
24167 && GET_CODE (tmp) == PRE_MODIFY)
24168 tmp = XEXP (tmp, 1);
24169 if (REG_P (tmp))
24170 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
24171 else
24173 if (GET_CODE (tmp) != PLUS
24174 || !REG_P (XEXP (tmp, 0))
24175 || !REG_P (XEXP (tmp, 1)))
24177 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
24178 break;
24181 if (REGNO (XEXP (tmp, 0)) == 0)
24182 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
24183 reg_names[ REGNO (XEXP (tmp, 0)) ]);
24184 else
24185 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
24186 reg_names[ REGNO (XEXP (tmp, 1)) ]);
24188 break;
24191 case 0:
24192 if (REG_P (x))
24193 fprintf (file, "%s", reg_names[REGNO (x)]);
24194 else if (MEM_P (x))
24196 /* We need to handle PRE_INC and PRE_DEC here, since we need to
24197 know the width from the mode. */
24198 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
24199 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
24200 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
24201 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
24202 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
24203 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
24204 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
24205 output_address (GET_MODE (x), XEXP (XEXP (x, 0), 1));
24206 else
24207 output_address (GET_MODE (x), XEXP (x, 0));
24209 else
24211 if (toc_relative_expr_p (x, false))
24212 /* This hack along with a corresponding hack in
24213 rs6000_output_addr_const_extra arranges to output addends
24214 where the assembler expects to find them. eg.
24215 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
24216 without this hack would be output as "x@toc+4". We
24217 want "x+4@toc". */
24218 output_addr_const (file, CONST_CAST_RTX (tocrel_base));
24219 else
24220 output_addr_const (file, x);
24222 return;
24224 case '&':
24225 if (const char *name = get_some_local_dynamic_name ())
24226 assemble_name (file, name);
24227 else
24228 output_operand_lossage ("'%%&' used without any "
24229 "local dynamic TLS references");
24230 return;
24232 default:
24233 output_operand_lossage ("invalid %%xn code");
24237 /* Print the address of an operand. */
24239 void
24240 print_operand_address (FILE *file, rtx x)
24242 if (REG_P (x))
24243 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
24244 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST
24245 || GET_CODE (x) == LABEL_REF)
24247 output_addr_const (file, x);
24248 if (small_data_operand (x, GET_MODE (x)))
24249 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
24250 reg_names[SMALL_DATA_REG]);
24251 else
24252 gcc_assert (!TARGET_TOC);
24254 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
24255 && REG_P (XEXP (x, 1)))
24257 if (REGNO (XEXP (x, 0)) == 0)
24258 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
24259 reg_names[ REGNO (XEXP (x, 0)) ]);
24260 else
24261 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
24262 reg_names[ REGNO (XEXP (x, 1)) ]);
24264 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
24265 && GET_CODE (XEXP (x, 1)) == CONST_INT)
24266 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
24267 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
24268 #if TARGET_MACHO
24269 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
24270 && CONSTANT_P (XEXP (x, 1)))
24272 fprintf (file, "lo16(");
24273 output_addr_const (file, XEXP (x, 1));
24274 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
24276 #endif
24277 #if TARGET_ELF
24278 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
24279 && CONSTANT_P (XEXP (x, 1)))
24281 output_addr_const (file, XEXP (x, 1));
24282 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
24284 #endif
24285 else if (toc_relative_expr_p (x, false))
24287 /* This hack along with a corresponding hack in
24288 rs6000_output_addr_const_extra arranges to output addends
24289 where the assembler expects to find them. eg.
24290 (lo_sum (reg 9)
24291 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
24292 without this hack would be output as "x@toc+8@l(9)". We
24293 want "x+8@toc@l(9)". */
24294 output_addr_const (file, CONST_CAST_RTX (tocrel_base));
24295 if (GET_CODE (x) == LO_SUM)
24296 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
24297 else
24298 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base, 0, 1))]);
24300 else
24301 gcc_unreachable ();
24304 /* Implement TARGET_OUTPUT_ADDR_CONST_EXTRA. */
24306 static bool
24307 rs6000_output_addr_const_extra (FILE *file, rtx x)
24309 if (GET_CODE (x) == UNSPEC)
24310 switch (XINT (x, 1))
24312 case UNSPEC_TOCREL:
24313 gcc_checking_assert (GET_CODE (XVECEXP (x, 0, 0)) == SYMBOL_REF
24314 && REG_P (XVECEXP (x, 0, 1))
24315 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
24316 output_addr_const (file, XVECEXP (x, 0, 0));
24317 if (x == tocrel_base && tocrel_offset != const0_rtx)
24319 if (INTVAL (tocrel_offset) >= 0)
24320 fprintf (file, "+");
24321 output_addr_const (file, CONST_CAST_RTX (tocrel_offset));
24323 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
24325 putc ('-', file);
24326 assemble_name (file, toc_label_name);
24327 need_toc_init = 1;
24329 else if (TARGET_ELF)
24330 fputs ("@toc", file);
24331 return true;
24333 #if TARGET_MACHO
24334 case UNSPEC_MACHOPIC_OFFSET:
24335 output_addr_const (file, XVECEXP (x, 0, 0));
24336 putc ('-', file);
24337 machopic_output_function_base_name (file);
24338 return true;
24339 #endif
24341 return false;
24344 /* Target hook for assembling integer objects. The PowerPC version has
24345 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
24346 is defined. It also needs to handle DI-mode objects on 64-bit
24347 targets. */
24349 static bool
24350 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
24352 #ifdef RELOCATABLE_NEEDS_FIXUP
24353 /* Special handling for SI values. */
24354 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
24356 static int recurse = 0;
24358 /* For -mrelocatable, we mark all addresses that need to be fixed up in
24359 the .fixup section. Since the TOC section is already relocated, we
24360 don't need to mark it here. We used to skip the text section, but it
24361 should never be valid for relocated addresses to be placed in the text
24362 section. */
24363 if (DEFAULT_ABI == ABI_V4
24364 && (TARGET_RELOCATABLE || flag_pic > 1)
24365 && in_section != toc_section
24366 && !recurse
24367 && !CONST_SCALAR_INT_P (x)
24368 && CONSTANT_P (x))
24370 char buf[256];
24372 recurse = 1;
24373 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
24374 fixuplabelno++;
24375 ASM_OUTPUT_LABEL (asm_out_file, buf);
24376 fprintf (asm_out_file, "\t.long\t(");
24377 output_addr_const (asm_out_file, x);
24378 fprintf (asm_out_file, ")@fixup\n");
24379 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
24380 ASM_OUTPUT_ALIGN (asm_out_file, 2);
24381 fprintf (asm_out_file, "\t.long\t");
24382 assemble_name (asm_out_file, buf);
24383 fprintf (asm_out_file, "\n\t.previous\n");
24384 recurse = 0;
24385 return true;
24387 /* Remove initial .'s to turn a -mcall-aixdesc function
24388 address into the address of the descriptor, not the function
24389 itself. */
24390 else if (GET_CODE (x) == SYMBOL_REF
24391 && XSTR (x, 0)[0] == '.'
24392 && DEFAULT_ABI == ABI_AIX)
24394 const char *name = XSTR (x, 0);
24395 while (*name == '.')
24396 name++;
24398 fprintf (asm_out_file, "\t.long\t%s\n", name);
24399 return true;
24402 #endif /* RELOCATABLE_NEEDS_FIXUP */
24403 return default_assemble_integer (x, size, aligned_p);
24406 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
24407 /* Emit an assembler directive to set symbol visibility for DECL to
24408 VISIBILITY_TYPE. */
24410 static void
24411 rs6000_assemble_visibility (tree decl, int vis)
24413 if (TARGET_XCOFF)
24414 return;
24416 /* Functions need to have their entry point symbol visibility set as
24417 well as their descriptor symbol visibility. */
24418 if (DEFAULT_ABI == ABI_AIX
24419 && DOT_SYMBOLS
24420 && TREE_CODE (decl) == FUNCTION_DECL)
24422 static const char * const visibility_types[] = {
24423 NULL, "protected", "hidden", "internal"
24426 const char *name, *type;
24428 name = ((* targetm.strip_name_encoding)
24429 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
24430 type = visibility_types[vis];
24432 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
24433 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
24435 else
24436 default_assemble_visibility (decl, vis);
24438 #endif
24440 enum rtx_code
24441 rs6000_reverse_condition (machine_mode mode, enum rtx_code code)
24443 /* Reversal of FP compares takes care -- an ordered compare
24444 becomes an unordered compare and vice versa. */
24445 if (mode == CCFPmode
24446 && (!flag_finite_math_only
24447 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
24448 || code == UNEQ || code == LTGT))
24449 return reverse_condition_maybe_unordered (code);
24450 else
24451 return reverse_condition (code);
24454 /* Generate a compare for CODE. Return a brand-new rtx that
24455 represents the result of the compare. */
24457 static rtx
24458 rs6000_generate_compare (rtx cmp, machine_mode mode)
24460 machine_mode comp_mode;
24461 rtx compare_result;
24462 enum rtx_code code = GET_CODE (cmp);
24463 rtx op0 = XEXP (cmp, 0);
24464 rtx op1 = XEXP (cmp, 1);
24466 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
24467 comp_mode = CCmode;
24468 else if (FLOAT_MODE_P (mode))
24469 comp_mode = CCFPmode;
24470 else if (code == GTU || code == LTU
24471 || code == GEU || code == LEU)
24472 comp_mode = CCUNSmode;
24473 else if ((code == EQ || code == NE)
24474 && unsigned_reg_p (op0)
24475 && (unsigned_reg_p (op1)
24476 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
24477 /* These are unsigned values, perhaps there will be a later
24478 ordering compare that can be shared with this one. */
24479 comp_mode = CCUNSmode;
24480 else
24481 comp_mode = CCmode;
24483 /* If we have an unsigned compare, make sure we don't have a signed value as
24484 an immediate. */
24485 if (comp_mode == CCUNSmode && GET_CODE (op1) == CONST_INT
24486 && INTVAL (op1) < 0)
24488 op0 = copy_rtx_if_shared (op0);
24489 op1 = force_reg (GET_MODE (op0), op1);
24490 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
24493 /* First, the compare. */
24494 compare_result = gen_reg_rtx (comp_mode);
24496 /* E500 FP compare instructions on the GPRs. Yuck! */
24497 if ((!TARGET_FPRS && TARGET_HARD_FLOAT)
24498 && FLOAT_MODE_P (mode))
24500 rtx cmp, or_result, compare_result2;
24501 machine_mode op_mode = GET_MODE (op0);
24502 bool reverse_p;
24504 if (op_mode == VOIDmode)
24505 op_mode = GET_MODE (op1);
24507 /* First reverse the condition codes that aren't directly supported. */
24508 switch (code)
24510 case NE:
24511 case UNLT:
24512 case UNLE:
24513 case UNGT:
24514 case UNGE:
24515 code = reverse_condition_maybe_unordered (code);
24516 reverse_p = true;
24517 break;
24519 case EQ:
24520 case LT:
24521 case LE:
24522 case GT:
24523 case GE:
24524 reverse_p = false;
24525 break;
24527 default:
24528 gcc_unreachable ();
24531 /* The E500 FP compare instructions toggle the GT bit (CR bit 1) only.
24532 This explains the following mess. */
24534 switch (code)
24536 case EQ:
24537 switch (op_mode)
24539 case SFmode:
24540 cmp = (flag_finite_math_only && !flag_trapping_math)
24541 ? gen_tstsfeq_gpr (compare_result, op0, op1)
24542 : gen_cmpsfeq_gpr (compare_result, op0, op1);
24543 break;
24545 case DFmode:
24546 cmp = (flag_finite_math_only && !flag_trapping_math)
24547 ? gen_tstdfeq_gpr (compare_result, op0, op1)
24548 : gen_cmpdfeq_gpr (compare_result, op0, op1);
24549 break;
24551 case TFmode:
24552 case IFmode:
24553 case KFmode:
24554 cmp = (flag_finite_math_only && !flag_trapping_math)
24555 ? gen_tsttfeq_gpr (compare_result, op0, op1)
24556 : gen_cmptfeq_gpr (compare_result, op0, op1);
24557 break;
24559 default:
24560 gcc_unreachable ();
24562 break;
24564 case GT:
24565 case GE:
24566 switch (op_mode)
24568 case SFmode:
24569 cmp = (flag_finite_math_only && !flag_trapping_math)
24570 ? gen_tstsfgt_gpr (compare_result, op0, op1)
24571 : gen_cmpsfgt_gpr (compare_result, op0, op1);
24572 break;
24574 case DFmode:
24575 cmp = (flag_finite_math_only && !flag_trapping_math)
24576 ? gen_tstdfgt_gpr (compare_result, op0, op1)
24577 : gen_cmpdfgt_gpr (compare_result, op0, op1);
24578 break;
24580 case TFmode:
24581 case IFmode:
24582 case KFmode:
24583 cmp = (flag_finite_math_only && !flag_trapping_math)
24584 ? gen_tsttfgt_gpr (compare_result, op0, op1)
24585 : gen_cmptfgt_gpr (compare_result, op0, op1);
24586 break;
24588 default:
24589 gcc_unreachable ();
24591 break;
24593 case LT:
24594 case LE:
24595 switch (op_mode)
24597 case SFmode:
24598 cmp = (flag_finite_math_only && !flag_trapping_math)
24599 ? gen_tstsflt_gpr (compare_result, op0, op1)
24600 : gen_cmpsflt_gpr (compare_result, op0, op1);
24601 break;
24603 case DFmode:
24604 cmp = (flag_finite_math_only && !flag_trapping_math)
24605 ? gen_tstdflt_gpr (compare_result, op0, op1)
24606 : gen_cmpdflt_gpr (compare_result, op0, op1);
24607 break;
24609 case TFmode:
24610 case IFmode:
24611 case KFmode:
24612 cmp = (flag_finite_math_only && !flag_trapping_math)
24613 ? gen_tsttflt_gpr (compare_result, op0, op1)
24614 : gen_cmptflt_gpr (compare_result, op0, op1);
24615 break;
24617 default:
24618 gcc_unreachable ();
24620 break;
24622 default:
24623 gcc_unreachable ();
24626 /* Synthesize LE and GE from LT/GT || EQ. */
24627 if (code == LE || code == GE)
24629 emit_insn (cmp);
24631 compare_result2 = gen_reg_rtx (CCFPmode);
24633 /* Do the EQ. */
24634 switch (op_mode)
24636 case SFmode:
24637 cmp = (flag_finite_math_only && !flag_trapping_math)
24638 ? gen_tstsfeq_gpr (compare_result2, op0, op1)
24639 : gen_cmpsfeq_gpr (compare_result2, op0, op1);
24640 break;
24642 case DFmode:
24643 cmp = (flag_finite_math_only && !flag_trapping_math)
24644 ? gen_tstdfeq_gpr (compare_result2, op0, op1)
24645 : gen_cmpdfeq_gpr (compare_result2, op0, op1);
24646 break;
24648 case TFmode:
24649 case IFmode:
24650 case KFmode:
24651 cmp = (flag_finite_math_only && !flag_trapping_math)
24652 ? gen_tsttfeq_gpr (compare_result2, op0, op1)
24653 : gen_cmptfeq_gpr (compare_result2, op0, op1);
24654 break;
24656 default:
24657 gcc_unreachable ();
24660 emit_insn (cmp);
24662 /* OR them together. */
24663 or_result = gen_reg_rtx (CCFPmode);
24664 cmp = gen_e500_cr_ior_compare (or_result, compare_result,
24665 compare_result2);
24666 compare_result = or_result;
24669 code = reverse_p ? NE : EQ;
24671 emit_insn (cmp);
24674 /* IEEE 128-bit support in VSX registers when we do not have hardware
24675 support. */
24676 else if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
24678 rtx libfunc = NULL_RTX;
24679 bool check_nan = false;
24680 rtx dest;
24682 switch (code)
24684 case EQ:
24685 case NE:
24686 libfunc = optab_libfunc (eq_optab, mode);
24687 break;
24689 case GT:
24690 case GE:
24691 libfunc = optab_libfunc (ge_optab, mode);
24692 break;
24694 case LT:
24695 case LE:
24696 libfunc = optab_libfunc (le_optab, mode);
24697 break;
24699 case UNORDERED:
24700 case ORDERED:
24701 libfunc = optab_libfunc (unord_optab, mode);
24702 code = (code == UNORDERED) ? NE : EQ;
24703 break;
24705 case UNGE:
24706 case UNGT:
24707 check_nan = true;
24708 libfunc = optab_libfunc (ge_optab, mode);
24709 code = (code == UNGE) ? GE : GT;
24710 break;
24712 case UNLE:
24713 case UNLT:
24714 check_nan = true;
24715 libfunc = optab_libfunc (le_optab, mode);
24716 code = (code == UNLE) ? LE : LT;
24717 break;
24719 case UNEQ:
24720 case LTGT:
24721 check_nan = true;
24722 libfunc = optab_libfunc (eq_optab, mode);
24723 code = (code = UNEQ) ? EQ : NE;
24724 break;
24726 default:
24727 gcc_unreachable ();
24730 gcc_assert (libfunc);
24732 if (!check_nan)
24733 dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
24734 SImode, 2, op0, mode, op1, mode);
24736 /* The library signals an exception for signalling NaNs, so we need to
24737 handle isgreater, etc. by first checking isordered. */
24738 else
24740 rtx ne_rtx, normal_dest, unord_dest;
24741 rtx unord_func = optab_libfunc (unord_optab, mode);
24742 rtx join_label = gen_label_rtx ();
24743 rtx join_ref = gen_rtx_LABEL_REF (VOIDmode, join_label);
24744 rtx unord_cmp = gen_reg_rtx (comp_mode);
24747 /* Test for either value being a NaN. */
24748 gcc_assert (unord_func);
24749 unord_dest = emit_library_call_value (unord_func, NULL_RTX, LCT_CONST,
24750 SImode, 2, op0, mode, op1,
24751 mode);
24753 /* Set value (0) if either value is a NaN, and jump to the join
24754 label. */
24755 dest = gen_reg_rtx (SImode);
24756 emit_move_insn (dest, const1_rtx);
24757 emit_insn (gen_rtx_SET (unord_cmp,
24758 gen_rtx_COMPARE (comp_mode, unord_dest,
24759 const0_rtx)));
24761 ne_rtx = gen_rtx_NE (comp_mode, unord_cmp, const0_rtx);
24762 emit_jump_insn (gen_rtx_SET (pc_rtx,
24763 gen_rtx_IF_THEN_ELSE (VOIDmode, ne_rtx,
24764 join_ref,
24765 pc_rtx)));
24767 /* Do the normal comparison, knowing that the values are not
24768 NaNs. */
24769 normal_dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
24770 SImode, 2, op0, mode, op1,
24771 mode);
24773 emit_insn (gen_cstoresi4 (dest,
24774 gen_rtx_fmt_ee (code, SImode, normal_dest,
24775 const0_rtx),
24776 normal_dest, const0_rtx));
24778 /* Join NaN and non-Nan paths. Compare dest against 0. */
24779 emit_label (join_label);
24780 code = NE;
24783 emit_insn (gen_rtx_SET (compare_result,
24784 gen_rtx_COMPARE (comp_mode, dest, const0_rtx)));
24787 else
24789 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
24790 CLOBBERs to match cmptf_internal2 pattern. */
24791 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
24792 && FLOAT128_IBM_P (GET_MODE (op0))
24793 && TARGET_HARD_FLOAT && TARGET_FPRS)
24794 emit_insn (gen_rtx_PARALLEL (VOIDmode,
24795 gen_rtvec (10,
24796 gen_rtx_SET (compare_result,
24797 gen_rtx_COMPARE (comp_mode, op0, op1)),
24798 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
24799 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
24800 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
24801 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
24802 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
24803 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
24804 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
24805 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
24806 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
24807 else if (GET_CODE (op1) == UNSPEC
24808 && XINT (op1, 1) == UNSPEC_SP_TEST)
24810 rtx op1b = XVECEXP (op1, 0, 0);
24811 comp_mode = CCEQmode;
24812 compare_result = gen_reg_rtx (CCEQmode);
24813 if (TARGET_64BIT)
24814 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
24815 else
24816 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
24818 else
24819 emit_insn (gen_rtx_SET (compare_result,
24820 gen_rtx_COMPARE (comp_mode, op0, op1)));
24823 /* Some kinds of FP comparisons need an OR operation;
24824 under flag_finite_math_only we don't bother. */
24825 if (FLOAT_MODE_P (mode)
24826 && (!FLOAT128_IEEE_P (mode) || TARGET_FLOAT128_HW)
24827 && !flag_finite_math_only
24828 && !(TARGET_HARD_FLOAT && !TARGET_FPRS)
24829 && (code == LE || code == GE
24830 || code == UNEQ || code == LTGT
24831 || code == UNGT || code == UNLT))
24833 enum rtx_code or1, or2;
24834 rtx or1_rtx, or2_rtx, compare2_rtx;
24835 rtx or_result = gen_reg_rtx (CCEQmode);
24837 switch (code)
24839 case LE: or1 = LT; or2 = EQ; break;
24840 case GE: or1 = GT; or2 = EQ; break;
24841 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
24842 case LTGT: or1 = LT; or2 = GT; break;
24843 case UNGT: or1 = UNORDERED; or2 = GT; break;
24844 case UNLT: or1 = UNORDERED; or2 = LT; break;
24845 default: gcc_unreachable ();
24847 validate_condition_mode (or1, comp_mode);
24848 validate_condition_mode (or2, comp_mode);
24849 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
24850 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
24851 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
24852 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
24853 const_true_rtx);
24854 emit_insn (gen_rtx_SET (or_result, compare2_rtx));
24856 compare_result = or_result;
24857 code = EQ;
24860 validate_condition_mode (code, GET_MODE (compare_result));
24862 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
24866 /* Return the diagnostic message string if the binary operation OP is
24867 not permitted on TYPE1 and TYPE2, NULL otherwise. */
24869 static const char*
24870 rs6000_invalid_binary_op (int op ATTRIBUTE_UNUSED,
24871 const_tree type1,
24872 const_tree type2)
24874 enum machine_mode mode1 = TYPE_MODE (type1);
24875 enum machine_mode mode2 = TYPE_MODE (type2);
24877 /* For complex modes, use the inner type. */
24878 if (COMPLEX_MODE_P (mode1))
24879 mode1 = GET_MODE_INNER (mode1);
24881 if (COMPLEX_MODE_P (mode2))
24882 mode2 = GET_MODE_INNER (mode2);
24884 /* Don't allow IEEE 754R 128-bit binary floating point and IBM extended
24885 double to intermix unless -mfloat128-convert. */
24886 if (mode1 == mode2)
24887 return NULL;
24889 if (!TARGET_FLOAT128_CVT)
24891 if ((mode1 == KFmode && mode2 == IFmode)
24892 || (mode1 == IFmode && mode2 == KFmode))
24893 return N_("__float128 and __ibm128 cannot be used in the same "
24894 "expression");
24896 if (TARGET_IEEEQUAD
24897 && ((mode1 == IFmode && mode2 == TFmode)
24898 || (mode1 == TFmode && mode2 == IFmode)))
24899 return N_("__ibm128 and long double cannot be used in the same "
24900 "expression");
24902 if (!TARGET_IEEEQUAD
24903 && ((mode1 == KFmode && mode2 == TFmode)
24904 || (mode1 == TFmode && mode2 == KFmode)))
24905 return N_("__float128 and long double cannot be used in the same "
24906 "expression");
24909 return NULL;
24913 /* Expand floating point conversion to/from __float128 and __ibm128. */
24915 void
24916 rs6000_expand_float128_convert (rtx dest, rtx src, bool unsigned_p)
24918 machine_mode dest_mode = GET_MODE (dest);
24919 machine_mode src_mode = GET_MODE (src);
24920 convert_optab cvt = unknown_optab;
24921 bool do_move = false;
24922 rtx libfunc = NULL_RTX;
24923 rtx dest2;
24924 typedef rtx (*rtx_2func_t) (rtx, rtx);
24925 rtx_2func_t hw_convert = (rtx_2func_t)0;
24926 size_t kf_or_tf;
24928 struct hw_conv_t {
24929 rtx_2func_t from_df;
24930 rtx_2func_t from_sf;
24931 rtx_2func_t from_si_sign;
24932 rtx_2func_t from_si_uns;
24933 rtx_2func_t from_di_sign;
24934 rtx_2func_t from_di_uns;
24935 rtx_2func_t to_df;
24936 rtx_2func_t to_sf;
24937 rtx_2func_t to_si_sign;
24938 rtx_2func_t to_si_uns;
24939 rtx_2func_t to_di_sign;
24940 rtx_2func_t to_di_uns;
24941 } hw_conversions[2] = {
24942 /* convertions to/from KFmode */
24944 gen_extenddfkf2_hw, /* KFmode <- DFmode. */
24945 gen_extendsfkf2_hw, /* KFmode <- SFmode. */
24946 gen_float_kfsi2_hw, /* KFmode <- SImode (signed). */
24947 gen_floatuns_kfsi2_hw, /* KFmode <- SImode (unsigned). */
24948 gen_float_kfdi2_hw, /* KFmode <- DImode (signed). */
24949 gen_floatuns_kfdi2_hw, /* KFmode <- DImode (unsigned). */
24950 gen_trunckfdf2_hw, /* DFmode <- KFmode. */
24951 gen_trunckfsf2_hw, /* SFmode <- KFmode. */
24952 gen_fix_kfsi2_hw, /* SImode <- KFmode (signed). */
24953 gen_fixuns_kfsi2_hw, /* SImode <- KFmode (unsigned). */
24954 gen_fix_kfdi2_hw, /* DImode <- KFmode (signed). */
24955 gen_fixuns_kfdi2_hw, /* DImode <- KFmode (unsigned). */
24958 /* convertions to/from TFmode */
24960 gen_extenddftf2_hw, /* TFmode <- DFmode. */
24961 gen_extendsftf2_hw, /* TFmode <- SFmode. */
24962 gen_float_tfsi2_hw, /* TFmode <- SImode (signed). */
24963 gen_floatuns_tfsi2_hw, /* TFmode <- SImode (unsigned). */
24964 gen_float_tfdi2_hw, /* TFmode <- DImode (signed). */
24965 gen_floatuns_tfdi2_hw, /* TFmode <- DImode (unsigned). */
24966 gen_trunctfdf2_hw, /* DFmode <- TFmode. */
24967 gen_trunctfsf2_hw, /* SFmode <- TFmode. */
24968 gen_fix_tfsi2_hw, /* SImode <- TFmode (signed). */
24969 gen_fixuns_tfsi2_hw, /* SImode <- TFmode (unsigned). */
24970 gen_fix_tfdi2_hw, /* DImode <- TFmode (signed). */
24971 gen_fixuns_tfdi2_hw, /* DImode <- TFmode (unsigned). */
24975 if (dest_mode == src_mode)
24976 gcc_unreachable ();
24978 /* Eliminate memory operations. */
24979 if (MEM_P (src))
24980 src = force_reg (src_mode, src);
24982 if (MEM_P (dest))
24984 rtx tmp = gen_reg_rtx (dest_mode);
24985 rs6000_expand_float128_convert (tmp, src, unsigned_p);
24986 rs6000_emit_move (dest, tmp, dest_mode);
24987 return;
24990 /* Convert to IEEE 128-bit floating point. */
24991 if (FLOAT128_IEEE_P (dest_mode))
24993 if (dest_mode == KFmode)
24994 kf_or_tf = 0;
24995 else if (dest_mode == TFmode)
24996 kf_or_tf = 1;
24997 else
24998 gcc_unreachable ();
25000 switch (src_mode)
25002 case DFmode:
25003 cvt = sext_optab;
25004 hw_convert = hw_conversions[kf_or_tf].from_df;
25005 break;
25007 case SFmode:
25008 cvt = sext_optab;
25009 hw_convert = hw_conversions[kf_or_tf].from_sf;
25010 break;
25012 case KFmode:
25013 case IFmode:
25014 case TFmode:
25015 if (FLOAT128_IBM_P (src_mode))
25016 cvt = sext_optab;
25017 else
25018 do_move = true;
25019 break;
25021 case SImode:
25022 if (unsigned_p)
25024 cvt = ufloat_optab;
25025 hw_convert = hw_conversions[kf_or_tf].from_si_uns;
25027 else
25029 cvt = sfloat_optab;
25030 hw_convert = hw_conversions[kf_or_tf].from_si_sign;
25032 break;
25034 case DImode:
25035 if (unsigned_p)
25037 cvt = ufloat_optab;
25038 hw_convert = hw_conversions[kf_or_tf].from_di_uns;
25040 else
25042 cvt = sfloat_optab;
25043 hw_convert = hw_conversions[kf_or_tf].from_di_sign;
25045 break;
25047 default:
25048 gcc_unreachable ();
25052 /* Convert from IEEE 128-bit floating point. */
25053 else if (FLOAT128_IEEE_P (src_mode))
25055 if (src_mode == KFmode)
25056 kf_or_tf = 0;
25057 else if (src_mode == TFmode)
25058 kf_or_tf = 1;
25059 else
25060 gcc_unreachable ();
25062 switch (dest_mode)
25064 case DFmode:
25065 cvt = trunc_optab;
25066 hw_convert = hw_conversions[kf_or_tf].to_df;
25067 break;
25069 case SFmode:
25070 cvt = trunc_optab;
25071 hw_convert = hw_conversions[kf_or_tf].to_sf;
25072 break;
25074 case KFmode:
25075 case IFmode:
25076 case TFmode:
25077 if (FLOAT128_IBM_P (dest_mode))
25078 cvt = trunc_optab;
25079 else
25080 do_move = true;
25081 break;
25083 case SImode:
25084 if (unsigned_p)
25086 cvt = ufix_optab;
25087 hw_convert = hw_conversions[kf_or_tf].to_si_uns;
25089 else
25091 cvt = sfix_optab;
25092 hw_convert = hw_conversions[kf_or_tf].to_si_sign;
25094 break;
25096 case DImode:
25097 if (unsigned_p)
25099 cvt = ufix_optab;
25100 hw_convert = hw_conversions[kf_or_tf].to_di_uns;
25102 else
25104 cvt = sfix_optab;
25105 hw_convert = hw_conversions[kf_or_tf].to_di_sign;
25107 break;
25109 default:
25110 gcc_unreachable ();
25114 /* Both IBM format. */
25115 else if (FLOAT128_IBM_P (dest_mode) && FLOAT128_IBM_P (src_mode))
25116 do_move = true;
25118 else
25119 gcc_unreachable ();
25121 /* Handle conversion between TFmode/KFmode. */
25122 if (do_move)
25123 emit_move_insn (dest, gen_lowpart (dest_mode, src));
25125 /* Handle conversion if we have hardware support. */
25126 else if (TARGET_FLOAT128_HW && hw_convert)
25127 emit_insn ((hw_convert) (dest, src));
25129 /* Call an external function to do the conversion. */
25130 else if (cvt != unknown_optab)
25132 libfunc = convert_optab_libfunc (cvt, dest_mode, src_mode);
25133 gcc_assert (libfunc != NULL_RTX);
25135 dest2 = emit_library_call_value (libfunc, dest, LCT_CONST, dest_mode, 1, src,
25136 src_mode);
25138 gcc_assert (dest2 != NULL_RTX);
25139 if (!rtx_equal_p (dest, dest2))
25140 emit_move_insn (dest, dest2);
25143 else
25144 gcc_unreachable ();
25146 return;
25150 /* Emit the RTL for an sISEL pattern. */
25152 void
25153 rs6000_emit_sISEL (machine_mode mode ATTRIBUTE_UNUSED, rtx operands[])
25155 rs6000_emit_int_cmove (operands[0], operands[1], const1_rtx, const0_rtx);
25158 /* Emit RTL that sets a register to zero if OP1 and OP2 are equal. SCRATCH
25159 can be used as that dest register. Return the dest register. */
25162 rs6000_emit_eqne (machine_mode mode, rtx op1, rtx op2, rtx scratch)
25164 if (op2 == const0_rtx)
25165 return op1;
25167 if (GET_CODE (scratch) == SCRATCH)
25168 scratch = gen_reg_rtx (mode);
25170 if (logical_operand (op2, mode))
25171 emit_insn (gen_rtx_SET (scratch, gen_rtx_XOR (mode, op1, op2)));
25172 else
25173 emit_insn (gen_rtx_SET (scratch,
25174 gen_rtx_PLUS (mode, op1, negate_rtx (mode, op2))));
25176 return scratch;
25179 void
25180 rs6000_emit_sCOND (machine_mode mode, rtx operands[])
25182 rtx condition_rtx;
25183 machine_mode op_mode;
25184 enum rtx_code cond_code;
25185 rtx result = operands[0];
25187 condition_rtx = rs6000_generate_compare (operands[1], mode);
25188 cond_code = GET_CODE (condition_rtx);
25190 if (FLOAT_MODE_P (mode)
25191 && !TARGET_FPRS && TARGET_HARD_FLOAT)
25193 rtx t;
25195 PUT_MODE (condition_rtx, SImode);
25196 t = XEXP (condition_rtx, 0);
25198 gcc_assert (cond_code == NE || cond_code == EQ);
25200 if (cond_code == NE)
25201 emit_insn (gen_e500_flip_gt_bit (t, t));
25203 emit_insn (gen_move_from_CR_gt_bit (result, t));
25204 return;
25207 if (cond_code == NE
25208 || cond_code == GE || cond_code == LE
25209 || cond_code == GEU || cond_code == LEU
25210 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
25212 rtx not_result = gen_reg_rtx (CCEQmode);
25213 rtx not_op, rev_cond_rtx;
25214 machine_mode cc_mode;
25216 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
25218 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
25219 SImode, XEXP (condition_rtx, 0), const0_rtx);
25220 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
25221 emit_insn (gen_rtx_SET (not_result, not_op));
25222 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
25225 op_mode = GET_MODE (XEXP (operands[1], 0));
25226 if (op_mode == VOIDmode)
25227 op_mode = GET_MODE (XEXP (operands[1], 1));
25229 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
25231 PUT_MODE (condition_rtx, DImode);
25232 convert_move (result, condition_rtx, 0);
25234 else
25236 PUT_MODE (condition_rtx, SImode);
25237 emit_insn (gen_rtx_SET (result, condition_rtx));
25241 /* Emit a branch of kind CODE to location LOC. */
25243 void
25244 rs6000_emit_cbranch (machine_mode mode, rtx operands[])
25246 rtx condition_rtx, loc_ref;
25248 condition_rtx = rs6000_generate_compare (operands[0], mode);
25249 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
25250 emit_jump_insn (gen_rtx_SET (pc_rtx,
25251 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
25252 loc_ref, pc_rtx)));
25255 /* Return the string to output a conditional branch to LABEL, which is
25256 the operand template of the label, or NULL if the branch is really a
25257 conditional return.
25259 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
25260 condition code register and its mode specifies what kind of
25261 comparison we made.
25263 REVERSED is nonzero if we should reverse the sense of the comparison.
25265 INSN is the insn. */
25267 char *
25268 output_cbranch (rtx op, const char *label, int reversed, rtx_insn *insn)
25270 static char string[64];
25271 enum rtx_code code = GET_CODE (op);
25272 rtx cc_reg = XEXP (op, 0);
25273 machine_mode mode = GET_MODE (cc_reg);
25274 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
25275 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
25276 int really_reversed = reversed ^ need_longbranch;
25277 char *s = string;
25278 const char *ccode;
25279 const char *pred;
25280 rtx note;
25282 validate_condition_mode (code, mode);
25284 /* Work out which way this really branches. We could use
25285 reverse_condition_maybe_unordered here always but this
25286 makes the resulting assembler clearer. */
25287 if (really_reversed)
25289 /* Reversal of FP compares takes care -- an ordered compare
25290 becomes an unordered compare and vice versa. */
25291 if (mode == CCFPmode)
25292 code = reverse_condition_maybe_unordered (code);
25293 else
25294 code = reverse_condition (code);
25297 if ((!TARGET_FPRS && TARGET_HARD_FLOAT) && mode == CCFPmode)
25299 /* The efscmp/tst* instructions twiddle bit 2, which maps nicely
25300 to the GT bit. */
25301 switch (code)
25303 case EQ:
25304 /* Opposite of GT. */
25305 code = GT;
25306 break;
25308 case NE:
25309 code = UNLE;
25310 break;
25312 default:
25313 gcc_unreachable ();
25317 switch (code)
25319 /* Not all of these are actually distinct opcodes, but
25320 we distinguish them for clarity of the resulting assembler. */
25321 case NE: case LTGT:
25322 ccode = "ne"; break;
25323 case EQ: case UNEQ:
25324 ccode = "eq"; break;
25325 case GE: case GEU:
25326 ccode = "ge"; break;
25327 case GT: case GTU: case UNGT:
25328 ccode = "gt"; break;
25329 case LE: case LEU:
25330 ccode = "le"; break;
25331 case LT: case LTU: case UNLT:
25332 ccode = "lt"; break;
25333 case UNORDERED: ccode = "un"; break;
25334 case ORDERED: ccode = "nu"; break;
25335 case UNGE: ccode = "nl"; break;
25336 case UNLE: ccode = "ng"; break;
25337 default:
25338 gcc_unreachable ();
25341 /* Maybe we have a guess as to how likely the branch is. */
25342 pred = "";
25343 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
25344 if (note != NULL_RTX)
25346 /* PROB is the difference from 50%. */
25347 int prob = XINT (note, 0) - REG_BR_PROB_BASE / 2;
25349 /* Only hint for highly probable/improbable branches on newer cpus when
25350 we have real profile data, as static prediction overrides processor
25351 dynamic prediction. For older cpus we may as well always hint, but
25352 assume not taken for branches that are very close to 50% as a
25353 mispredicted taken branch is more expensive than a
25354 mispredicted not-taken branch. */
25355 if (rs6000_always_hint
25356 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
25357 && (profile_status_for_fn (cfun) != PROFILE_GUESSED)
25358 && br_prob_note_reliable_p (note)))
25360 if (abs (prob) > REG_BR_PROB_BASE / 20
25361 && ((prob > 0) ^ need_longbranch))
25362 pred = "+";
25363 else
25364 pred = "-";
25368 if (label == NULL)
25369 s += sprintf (s, "b%slr%s ", ccode, pred);
25370 else
25371 s += sprintf (s, "b%s%s ", ccode, pred);
25373 /* We need to escape any '%' characters in the reg_names string.
25374 Assume they'd only be the first character.... */
25375 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
25376 *s++ = '%';
25377 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
25379 if (label != NULL)
25381 /* If the branch distance was too far, we may have to use an
25382 unconditional branch to go the distance. */
25383 if (need_longbranch)
25384 s += sprintf (s, ",$+8\n\tb %s", label);
25385 else
25386 s += sprintf (s, ",%s", label);
25389 return string;
25392 /* Return the string to flip the GT bit on a CR. */
25393 char *
25394 output_e500_flip_gt_bit (rtx dst, rtx src)
25396 static char string[64];
25397 int a, b;
25399 gcc_assert (GET_CODE (dst) == REG && CR_REGNO_P (REGNO (dst))
25400 && GET_CODE (src) == REG && CR_REGNO_P (REGNO (src)));
25402 /* GT bit. */
25403 a = 4 * (REGNO (dst) - CR0_REGNO) + 1;
25404 b = 4 * (REGNO (src) - CR0_REGNO) + 1;
25406 sprintf (string, "crnot %d,%d", a, b);
25407 return string;
25410 /* Return insn for VSX or Altivec comparisons. */
25412 static rtx
25413 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
25415 rtx mask;
25416 machine_mode mode = GET_MODE (op0);
25418 switch (code)
25420 default:
25421 break;
25423 case GE:
25424 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
25425 return NULL_RTX;
25426 /* FALLTHRU */
25428 case EQ:
25429 case GT:
25430 case GTU:
25431 case ORDERED:
25432 case UNORDERED:
25433 case UNEQ:
25434 case LTGT:
25435 mask = gen_reg_rtx (mode);
25436 emit_insn (gen_rtx_SET (mask, gen_rtx_fmt_ee (code, mode, op0, op1)));
25437 return mask;
25440 return NULL_RTX;
25443 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
25444 DMODE is expected destination mode. This is a recursive function. */
25446 static rtx
25447 rs6000_emit_vector_compare (enum rtx_code rcode,
25448 rtx op0, rtx op1,
25449 machine_mode dmode)
25451 rtx mask;
25452 bool swap_operands = false;
25453 bool try_again = false;
25455 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
25456 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
25458 /* See if the comparison works as is. */
25459 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
25460 if (mask)
25461 return mask;
25463 switch (rcode)
25465 case LT:
25466 rcode = GT;
25467 swap_operands = true;
25468 try_again = true;
25469 break;
25470 case LTU:
25471 rcode = GTU;
25472 swap_operands = true;
25473 try_again = true;
25474 break;
25475 case NE:
25476 case UNLE:
25477 case UNLT:
25478 case UNGE:
25479 case UNGT:
25480 /* Invert condition and try again.
25481 e.g., A != B becomes ~(A==B). */
25483 enum rtx_code rev_code;
25484 enum insn_code nor_code;
25485 rtx mask2;
25487 rev_code = reverse_condition_maybe_unordered (rcode);
25488 if (rev_code == UNKNOWN)
25489 return NULL_RTX;
25491 nor_code = optab_handler (one_cmpl_optab, dmode);
25492 if (nor_code == CODE_FOR_nothing)
25493 return NULL_RTX;
25495 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
25496 if (!mask2)
25497 return NULL_RTX;
25499 mask = gen_reg_rtx (dmode);
25500 emit_insn (GEN_FCN (nor_code) (mask, mask2));
25501 return mask;
25503 break;
25504 case GE:
25505 case GEU:
25506 case LE:
25507 case LEU:
25508 /* Try GT/GTU/LT/LTU OR EQ */
25510 rtx c_rtx, eq_rtx;
25511 enum insn_code ior_code;
25512 enum rtx_code new_code;
25514 switch (rcode)
25516 case GE:
25517 new_code = GT;
25518 break;
25520 case GEU:
25521 new_code = GTU;
25522 break;
25524 case LE:
25525 new_code = LT;
25526 break;
25528 case LEU:
25529 new_code = LTU;
25530 break;
25532 default:
25533 gcc_unreachable ();
25536 ior_code = optab_handler (ior_optab, dmode);
25537 if (ior_code == CODE_FOR_nothing)
25538 return NULL_RTX;
25540 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
25541 if (!c_rtx)
25542 return NULL_RTX;
25544 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
25545 if (!eq_rtx)
25546 return NULL_RTX;
25548 mask = gen_reg_rtx (dmode);
25549 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
25550 return mask;
25552 break;
25553 default:
25554 return NULL_RTX;
25557 if (try_again)
25559 if (swap_operands)
25560 std::swap (op0, op1);
25562 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
25563 if (mask)
25564 return mask;
25567 /* You only get two chances. */
25568 return NULL_RTX;
25571 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
25572 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
25573 operands for the relation operation COND. */
25576 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
25577 rtx cond, rtx cc_op0, rtx cc_op1)
25579 machine_mode dest_mode = GET_MODE (dest);
25580 machine_mode mask_mode = GET_MODE (cc_op0);
25581 enum rtx_code rcode = GET_CODE (cond);
25582 machine_mode cc_mode = CCmode;
25583 rtx mask;
25584 rtx cond2;
25585 bool invert_move = false;
25587 if (VECTOR_UNIT_NONE_P (dest_mode))
25588 return 0;
25590 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
25591 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
25593 switch (rcode)
25595 /* Swap operands if we can, and fall back to doing the operation as
25596 specified, and doing a NOR to invert the test. */
25597 case NE:
25598 case UNLE:
25599 case UNLT:
25600 case UNGE:
25601 case UNGT:
25602 /* Invert condition and try again.
25603 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
25604 invert_move = true;
25605 rcode = reverse_condition_maybe_unordered (rcode);
25606 if (rcode == UNKNOWN)
25607 return 0;
25608 break;
25610 case GE:
25611 case LE:
25612 if (GET_MODE_CLASS (mask_mode) == MODE_VECTOR_INT)
25614 /* Invert condition to avoid compound test. */
25615 invert_move = true;
25616 rcode = reverse_condition (rcode);
25618 break;
25620 case GTU:
25621 case GEU:
25622 case LTU:
25623 case LEU:
25624 /* Mark unsigned tests with CCUNSmode. */
25625 cc_mode = CCUNSmode;
25627 /* Invert condition to avoid compound test if necessary. */
25628 if (rcode == GEU || rcode == LEU)
25630 invert_move = true;
25631 rcode = reverse_condition (rcode);
25633 break;
25635 default:
25636 break;
25639 /* Get the vector mask for the given relational operations. */
25640 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
25642 if (!mask)
25643 return 0;
25645 if (invert_move)
25646 std::swap (op_true, op_false);
25648 /* Optimize vec1 == vec2, to know the mask generates -1/0. */
25649 if (GET_MODE_CLASS (dest_mode) == MODE_VECTOR_INT
25650 && (GET_CODE (op_true) == CONST_VECTOR
25651 || GET_CODE (op_false) == CONST_VECTOR))
25653 rtx constant_0 = CONST0_RTX (dest_mode);
25654 rtx constant_m1 = CONSTM1_RTX (dest_mode);
25656 if (op_true == constant_m1 && op_false == constant_0)
25658 emit_move_insn (dest, mask);
25659 return 1;
25662 else if (op_true == constant_0 && op_false == constant_m1)
25664 emit_insn (gen_rtx_SET (dest, gen_rtx_NOT (dest_mode, mask)));
25665 return 1;
25668 /* If we can't use the vector comparison directly, perhaps we can use
25669 the mask for the true or false fields, instead of loading up a
25670 constant. */
25671 if (op_true == constant_m1)
25672 op_true = mask;
25674 if (op_false == constant_0)
25675 op_false = mask;
25678 if (!REG_P (op_true) && !SUBREG_P (op_true))
25679 op_true = force_reg (dest_mode, op_true);
25681 if (!REG_P (op_false) && !SUBREG_P (op_false))
25682 op_false = force_reg (dest_mode, op_false);
25684 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
25685 CONST0_RTX (dest_mode));
25686 emit_insn (gen_rtx_SET (dest,
25687 gen_rtx_IF_THEN_ELSE (dest_mode,
25688 cond2,
25689 op_true,
25690 op_false)));
25691 return 1;
25694 /* ISA 3.0 (power9) minmax subcase to emit a XSMAXCDP or XSMINCDP instruction
25695 for SF/DF scalars. Move TRUE_COND to DEST if OP of the operands of the last
25696 comparison is nonzero/true, FALSE_COND if it is zero/false. Return 0 if the
25697 hardware has no such operation. */
25699 static int
25700 rs6000_emit_p9_fp_minmax (rtx dest, rtx op, rtx true_cond, rtx false_cond)
25702 enum rtx_code code = GET_CODE (op);
25703 rtx op0 = XEXP (op, 0);
25704 rtx op1 = XEXP (op, 1);
25705 machine_mode compare_mode = GET_MODE (op0);
25706 machine_mode result_mode = GET_MODE (dest);
25707 bool max_p = false;
25709 if (result_mode != compare_mode)
25710 return 0;
25712 if (code == GE || code == GT)
25713 max_p = true;
25714 else if (code == LE || code == LT)
25715 max_p = false;
25716 else
25717 return 0;
25719 if (rtx_equal_p (op0, true_cond) && rtx_equal_p (op1, false_cond))
25722 else if (rtx_equal_p (op1, true_cond) && rtx_equal_p (op0, false_cond))
25723 max_p = !max_p;
25725 else
25726 return 0;
25728 rs6000_emit_minmax (dest, max_p ? SMAX : SMIN, op0, op1);
25729 return 1;
25732 /* ISA 3.0 (power9) conditional move subcase to emit XSCMP{EQ,GE,GT,NE}DP and
25733 XXSEL instructions for SF/DF scalars. Move TRUE_COND to DEST if OP of the
25734 operands of the last comparison is nonzero/true, FALSE_COND if it is
25735 zero/false. Return 0 if the hardware has no such operation. */
25737 static int
25738 rs6000_emit_p9_fp_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
25740 enum rtx_code code = GET_CODE (op);
25741 rtx op0 = XEXP (op, 0);
25742 rtx op1 = XEXP (op, 1);
25743 machine_mode result_mode = GET_MODE (dest);
25744 rtx compare_rtx;
25745 rtx cmove_rtx;
25746 rtx clobber_rtx;
25748 if (!can_create_pseudo_p ())
25749 return 0;
25751 switch (code)
25753 case EQ:
25754 case GE:
25755 case GT:
25756 break;
25758 case NE:
25759 case LT:
25760 case LE:
25761 code = swap_condition (code);
25762 std::swap (op0, op1);
25763 break;
25765 default:
25766 return 0;
25769 /* Generate: [(parallel [(set (dest)
25770 (if_then_else (op (cmp1) (cmp2))
25771 (true)
25772 (false)))
25773 (clobber (scratch))])]. */
25775 compare_rtx = gen_rtx_fmt_ee (code, CCFPmode, op0, op1);
25776 cmove_rtx = gen_rtx_SET (dest,
25777 gen_rtx_IF_THEN_ELSE (result_mode,
25778 compare_rtx,
25779 true_cond,
25780 false_cond));
25782 clobber_rtx = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (V2DImode));
25783 emit_insn (gen_rtx_PARALLEL (VOIDmode,
25784 gen_rtvec (2, cmove_rtx, clobber_rtx)));
25786 return 1;
25789 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
25790 operands of the last comparison is nonzero/true, FALSE_COND if it
25791 is zero/false. Return 0 if the hardware has no such operation. */
25794 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
25796 enum rtx_code code = GET_CODE (op);
25797 rtx op0 = XEXP (op, 0);
25798 rtx op1 = XEXP (op, 1);
25799 machine_mode compare_mode = GET_MODE (op0);
25800 machine_mode result_mode = GET_MODE (dest);
25801 rtx temp;
25802 bool is_against_zero;
25804 /* These modes should always match. */
25805 if (GET_MODE (op1) != compare_mode
25806 /* In the isel case however, we can use a compare immediate, so
25807 op1 may be a small constant. */
25808 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
25809 return 0;
25810 if (GET_MODE (true_cond) != result_mode)
25811 return 0;
25812 if (GET_MODE (false_cond) != result_mode)
25813 return 0;
25815 /* See if we can use the ISA 3.0 (power9) min/max/compare functions. */
25816 if (TARGET_P9_MINMAX
25817 && (compare_mode == SFmode || compare_mode == DFmode)
25818 && (result_mode == SFmode || result_mode == DFmode))
25820 if (rs6000_emit_p9_fp_minmax (dest, op, true_cond, false_cond))
25821 return 1;
25823 if (rs6000_emit_p9_fp_cmove (dest, op, true_cond, false_cond))
25824 return 1;
25827 /* Don't allow using floating point comparisons for integer results for
25828 now. */
25829 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
25830 return 0;
25832 /* First, work out if the hardware can do this at all, or
25833 if it's too slow.... */
25834 if (!FLOAT_MODE_P (compare_mode))
25836 if (TARGET_ISEL)
25837 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
25838 return 0;
25840 else if (TARGET_HARD_FLOAT && !TARGET_FPRS
25841 && SCALAR_FLOAT_MODE_P (compare_mode))
25842 return 0;
25844 is_against_zero = op1 == CONST0_RTX (compare_mode);
25846 /* A floating-point subtract might overflow, underflow, or produce
25847 an inexact result, thus changing the floating-point flags, so it
25848 can't be generated if we care about that. It's safe if one side
25849 of the construct is zero, since then no subtract will be
25850 generated. */
25851 if (SCALAR_FLOAT_MODE_P (compare_mode)
25852 && flag_trapping_math && ! is_against_zero)
25853 return 0;
25855 /* Eliminate half of the comparisons by switching operands, this
25856 makes the remaining code simpler. */
25857 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
25858 || code == LTGT || code == LT || code == UNLE)
25860 code = reverse_condition_maybe_unordered (code);
25861 temp = true_cond;
25862 true_cond = false_cond;
25863 false_cond = temp;
25866 /* UNEQ and LTGT take four instructions for a comparison with zero,
25867 it'll probably be faster to use a branch here too. */
25868 if (code == UNEQ && HONOR_NANS (compare_mode))
25869 return 0;
25871 /* We're going to try to implement comparisons by performing
25872 a subtract, then comparing against zero. Unfortunately,
25873 Inf - Inf is NaN which is not zero, and so if we don't
25874 know that the operand is finite and the comparison
25875 would treat EQ different to UNORDERED, we can't do it. */
25876 if (HONOR_INFINITIES (compare_mode)
25877 && code != GT && code != UNGE
25878 && (GET_CODE (op1) != CONST_DOUBLE
25879 || real_isinf (CONST_DOUBLE_REAL_VALUE (op1)))
25880 /* Constructs of the form (a OP b ? a : b) are safe. */
25881 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
25882 || (! rtx_equal_p (op0, true_cond)
25883 && ! rtx_equal_p (op1, true_cond))))
25884 return 0;
25886 /* At this point we know we can use fsel. */
25888 /* Reduce the comparison to a comparison against zero. */
25889 if (! is_against_zero)
25891 temp = gen_reg_rtx (compare_mode);
25892 emit_insn (gen_rtx_SET (temp, gen_rtx_MINUS (compare_mode, op0, op1)));
25893 op0 = temp;
25894 op1 = CONST0_RTX (compare_mode);
25897 /* If we don't care about NaNs we can reduce some of the comparisons
25898 down to faster ones. */
25899 if (! HONOR_NANS (compare_mode))
25900 switch (code)
25902 case GT:
25903 code = LE;
25904 temp = true_cond;
25905 true_cond = false_cond;
25906 false_cond = temp;
25907 break;
25908 case UNGE:
25909 code = GE;
25910 break;
25911 case UNEQ:
25912 code = EQ;
25913 break;
25914 default:
25915 break;
25918 /* Now, reduce everything down to a GE. */
25919 switch (code)
25921 case GE:
25922 break;
25924 case LE:
25925 temp = gen_reg_rtx (compare_mode);
25926 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
25927 op0 = temp;
25928 break;
25930 case ORDERED:
25931 temp = gen_reg_rtx (compare_mode);
25932 emit_insn (gen_rtx_SET (temp, gen_rtx_ABS (compare_mode, op0)));
25933 op0 = temp;
25934 break;
25936 case EQ:
25937 temp = gen_reg_rtx (compare_mode);
25938 emit_insn (gen_rtx_SET (temp,
25939 gen_rtx_NEG (compare_mode,
25940 gen_rtx_ABS (compare_mode, op0))));
25941 op0 = temp;
25942 break;
25944 case UNGE:
25945 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
25946 temp = gen_reg_rtx (result_mode);
25947 emit_insn (gen_rtx_SET (temp,
25948 gen_rtx_IF_THEN_ELSE (result_mode,
25949 gen_rtx_GE (VOIDmode,
25950 op0, op1),
25951 true_cond, false_cond)));
25952 false_cond = true_cond;
25953 true_cond = temp;
25955 temp = gen_reg_rtx (compare_mode);
25956 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
25957 op0 = temp;
25958 break;
25960 case GT:
25961 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
25962 temp = gen_reg_rtx (result_mode);
25963 emit_insn (gen_rtx_SET (temp,
25964 gen_rtx_IF_THEN_ELSE (result_mode,
25965 gen_rtx_GE (VOIDmode,
25966 op0, op1),
25967 true_cond, false_cond)));
25968 true_cond = false_cond;
25969 false_cond = temp;
25971 temp = gen_reg_rtx (compare_mode);
25972 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
25973 op0 = temp;
25974 break;
25976 default:
25977 gcc_unreachable ();
25980 emit_insn (gen_rtx_SET (dest,
25981 gen_rtx_IF_THEN_ELSE (result_mode,
25982 gen_rtx_GE (VOIDmode,
25983 op0, op1),
25984 true_cond, false_cond)));
25985 return 1;
25988 /* Same as above, but for ints (isel). */
25990 static int
25991 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
25993 rtx condition_rtx, cr;
25994 machine_mode mode = GET_MODE (dest);
25995 enum rtx_code cond_code;
25996 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
25997 bool signedp;
25999 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
26000 return 0;
26002 /* We still have to do the compare, because isel doesn't do a
26003 compare, it just looks at the CRx bits set by a previous compare
26004 instruction. */
26005 condition_rtx = rs6000_generate_compare (op, mode);
26006 cond_code = GET_CODE (condition_rtx);
26007 cr = XEXP (condition_rtx, 0);
26008 signedp = GET_MODE (cr) == CCmode;
26010 isel_func = (mode == SImode
26011 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
26012 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
26014 switch (cond_code)
26016 case LT: case GT: case LTU: case GTU: case EQ:
26017 /* isel handles these directly. */
26018 break;
26020 default:
26021 /* We need to swap the sense of the comparison. */
26023 std::swap (false_cond, true_cond);
26024 PUT_CODE (condition_rtx, reverse_condition (cond_code));
26026 break;
26029 false_cond = force_reg (mode, false_cond);
26030 if (true_cond != const0_rtx)
26031 true_cond = force_reg (mode, true_cond);
26033 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
26035 return 1;
26038 const char *
26039 output_isel (rtx *operands)
26041 enum rtx_code code;
26043 code = GET_CODE (operands[1]);
26045 if (code == GE || code == GEU || code == LE || code == LEU || code == NE)
26047 gcc_assert (GET_CODE (operands[2]) == REG
26048 && GET_CODE (operands[3]) == REG);
26049 PUT_CODE (operands[1], reverse_condition (code));
26050 return "isel %0,%3,%2,%j1";
26053 return "isel %0,%2,%3,%j1";
26056 void
26057 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
26059 machine_mode mode = GET_MODE (op0);
26060 enum rtx_code c;
26061 rtx target;
26063 /* VSX/altivec have direct min/max insns. */
26064 if ((code == SMAX || code == SMIN)
26065 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
26066 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
26068 emit_insn (gen_rtx_SET (dest, gen_rtx_fmt_ee (code, mode, op0, op1)));
26069 return;
26072 if (code == SMAX || code == SMIN)
26073 c = GE;
26074 else
26075 c = GEU;
26077 if (code == SMAX || code == UMAX)
26078 target = emit_conditional_move (dest, c, op0, op1, mode,
26079 op0, op1, mode, 0);
26080 else
26081 target = emit_conditional_move (dest, c, op0, op1, mode,
26082 op1, op0, mode, 0);
26083 gcc_assert (target);
26084 if (target != dest)
26085 emit_move_insn (dest, target);
26088 /* Split a signbit operation on 64-bit machines with direct move. Also allow
26089 for the value to come from memory or if it is already loaded into a GPR. */
26091 void
26092 rs6000_split_signbit (rtx dest, rtx src)
26094 machine_mode d_mode = GET_MODE (dest);
26095 machine_mode s_mode = GET_MODE (src);
26096 rtx dest_di = (d_mode == DImode) ? dest : gen_lowpart (DImode, dest);
26097 rtx shift_reg = dest_di;
26099 gcc_assert (FLOAT128_IEEE_P (s_mode) && TARGET_POWERPC64);
26101 if (MEM_P (src))
26103 rtx mem = (WORDS_BIG_ENDIAN
26104 ? adjust_address (src, DImode, 0)
26105 : adjust_address (src, DImode, 8));
26106 emit_insn (gen_rtx_SET (dest_di, mem));
26109 else
26111 unsigned int r = reg_or_subregno (src);
26113 if (INT_REGNO_P (r))
26114 shift_reg = gen_rtx_REG (DImode, r + (BYTES_BIG_ENDIAN == 0));
26116 else
26118 /* Generate the special mfvsrd instruction to get it in a GPR. */
26119 gcc_assert (VSX_REGNO_P (r));
26120 if (s_mode == KFmode)
26121 emit_insn (gen_signbitkf2_dm2 (dest_di, src));
26122 else
26123 emit_insn (gen_signbittf2_dm2 (dest_di, src));
26127 emit_insn (gen_lshrdi3 (dest_di, shift_reg, GEN_INT (63)));
26128 return;
26131 /* A subroutine of the atomic operation splitters. Jump to LABEL if
26132 COND is true. Mark the jump as unlikely to be taken. */
26134 static void
26135 emit_unlikely_jump (rtx cond, rtx label)
26137 int very_unlikely = REG_BR_PROB_BASE / 100 - 1;
26138 rtx x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
26139 rtx_insn *insn = emit_jump_insn (gen_rtx_SET (pc_rtx, x));
26140 add_int_reg_note (insn, REG_BR_PROB, very_unlikely);
26143 /* A subroutine of the atomic operation splitters. Emit a load-locked
26144 instruction in MODE. For QI/HImode, possibly use a pattern than includes
26145 the zero_extend operation. */
26147 static void
26148 emit_load_locked (machine_mode mode, rtx reg, rtx mem)
26150 rtx (*fn) (rtx, rtx) = NULL;
26152 switch (mode)
26154 case QImode:
26155 fn = gen_load_lockedqi;
26156 break;
26157 case HImode:
26158 fn = gen_load_lockedhi;
26159 break;
26160 case SImode:
26161 if (GET_MODE (mem) == QImode)
26162 fn = gen_load_lockedqi_si;
26163 else if (GET_MODE (mem) == HImode)
26164 fn = gen_load_lockedhi_si;
26165 else
26166 fn = gen_load_lockedsi;
26167 break;
26168 case DImode:
26169 fn = gen_load_lockeddi;
26170 break;
26171 case TImode:
26172 fn = gen_load_lockedti;
26173 break;
26174 default:
26175 gcc_unreachable ();
26177 emit_insn (fn (reg, mem));
26180 /* A subroutine of the atomic operation splitters. Emit a store-conditional
26181 instruction in MODE. */
26183 static void
26184 emit_store_conditional (machine_mode mode, rtx res, rtx mem, rtx val)
26186 rtx (*fn) (rtx, rtx, rtx) = NULL;
26188 switch (mode)
26190 case QImode:
26191 fn = gen_store_conditionalqi;
26192 break;
26193 case HImode:
26194 fn = gen_store_conditionalhi;
26195 break;
26196 case SImode:
26197 fn = gen_store_conditionalsi;
26198 break;
26199 case DImode:
26200 fn = gen_store_conditionaldi;
26201 break;
26202 case TImode:
26203 fn = gen_store_conditionalti;
26204 break;
26205 default:
26206 gcc_unreachable ();
26209 /* Emit sync before stwcx. to address PPC405 Erratum. */
26210 if (PPC405_ERRATUM77)
26211 emit_insn (gen_hwsync ());
26213 emit_insn (fn (res, mem, val));
26216 /* Expand barriers before and after a load_locked/store_cond sequence. */
26218 static rtx
26219 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
26221 rtx addr = XEXP (mem, 0);
26222 int strict_p = (reload_in_progress || reload_completed);
26224 if (!legitimate_indirect_address_p (addr, strict_p)
26225 && !legitimate_indexed_address_p (addr, strict_p))
26227 addr = force_reg (Pmode, addr);
26228 mem = replace_equiv_address_nv (mem, addr);
26231 switch (model)
26233 case MEMMODEL_RELAXED:
26234 case MEMMODEL_CONSUME:
26235 case MEMMODEL_ACQUIRE:
26236 break;
26237 case MEMMODEL_RELEASE:
26238 case MEMMODEL_ACQ_REL:
26239 emit_insn (gen_lwsync ());
26240 break;
26241 case MEMMODEL_SEQ_CST:
26242 emit_insn (gen_hwsync ());
26243 break;
26244 default:
26245 gcc_unreachable ();
26247 return mem;
26250 static void
26251 rs6000_post_atomic_barrier (enum memmodel model)
26253 switch (model)
26255 case MEMMODEL_RELAXED:
26256 case MEMMODEL_CONSUME:
26257 case MEMMODEL_RELEASE:
26258 break;
26259 case MEMMODEL_ACQUIRE:
26260 case MEMMODEL_ACQ_REL:
26261 case MEMMODEL_SEQ_CST:
26262 emit_insn (gen_isync ());
26263 break;
26264 default:
26265 gcc_unreachable ();
26269 /* A subroutine of the various atomic expanders. For sub-word operations,
26270 we must adjust things to operate on SImode. Given the original MEM,
26271 return a new aligned memory. Also build and return the quantities by
26272 which to shift and mask. */
26274 static rtx
26275 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
26277 rtx addr, align, shift, mask, mem;
26278 HOST_WIDE_INT shift_mask;
26279 machine_mode mode = GET_MODE (orig_mem);
26281 /* For smaller modes, we have to implement this via SImode. */
26282 shift_mask = (mode == QImode ? 0x18 : 0x10);
26284 addr = XEXP (orig_mem, 0);
26285 addr = force_reg (GET_MODE (addr), addr);
26287 /* Aligned memory containing subword. Generate a new memory. We
26288 do not want any of the existing MEM_ATTR data, as we're now
26289 accessing memory outside the original object. */
26290 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
26291 NULL_RTX, 1, OPTAB_LIB_WIDEN);
26292 mem = gen_rtx_MEM (SImode, align);
26293 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
26294 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
26295 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
26297 /* Shift amount for subword relative to aligned word. */
26298 shift = gen_reg_rtx (SImode);
26299 addr = gen_lowpart (SImode, addr);
26300 rtx tmp = gen_reg_rtx (SImode);
26301 emit_insn (gen_ashlsi3 (tmp, addr, GEN_INT (3)));
26302 emit_insn (gen_andsi3 (shift, tmp, GEN_INT (shift_mask)));
26303 if (BYTES_BIG_ENDIAN)
26304 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
26305 shift, 1, OPTAB_LIB_WIDEN);
26306 *pshift = shift;
26308 /* Mask for insertion. */
26309 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
26310 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
26311 *pmask = mask;
26313 return mem;
26316 /* A subroutine of the various atomic expanders. For sub-word operands,
26317 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
26319 static rtx
26320 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
26322 rtx x;
26324 x = gen_reg_rtx (SImode);
26325 emit_insn (gen_rtx_SET (x, gen_rtx_AND (SImode,
26326 gen_rtx_NOT (SImode, mask),
26327 oldval)));
26329 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
26331 return x;
26334 /* A subroutine of the various atomic expanders. For sub-word operands,
26335 extract WIDE to NARROW via SHIFT. */
26337 static void
26338 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
26340 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
26341 wide, 1, OPTAB_LIB_WIDEN);
26342 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
26345 /* Expand an atomic compare and swap operation. */
26347 void
26348 rs6000_expand_atomic_compare_and_swap (rtx operands[])
26350 rtx boolval, retval, mem, oldval, newval, cond;
26351 rtx label1, label2, x, mask, shift;
26352 machine_mode mode, orig_mode;
26353 enum memmodel mod_s, mod_f;
26354 bool is_weak;
26356 boolval = operands[0];
26357 retval = operands[1];
26358 mem = operands[2];
26359 oldval = operands[3];
26360 newval = operands[4];
26361 is_weak = (INTVAL (operands[5]) != 0);
26362 mod_s = memmodel_base (INTVAL (operands[6]));
26363 mod_f = memmodel_base (INTVAL (operands[7]));
26364 orig_mode = mode = GET_MODE (mem);
26366 mask = shift = NULL_RTX;
26367 if (mode == QImode || mode == HImode)
26369 /* Before power8, we didn't have access to lbarx/lharx, so generate a
26370 lwarx and shift/mask operations. With power8, we need to do the
26371 comparison in SImode, but the store is still done in QI/HImode. */
26372 oldval = convert_modes (SImode, mode, oldval, 1);
26374 if (!TARGET_SYNC_HI_QI)
26376 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
26378 /* Shift and mask OLDVAL into position with the word. */
26379 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
26380 NULL_RTX, 1, OPTAB_LIB_WIDEN);
26382 /* Shift and mask NEWVAL into position within the word. */
26383 newval = convert_modes (SImode, mode, newval, 1);
26384 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
26385 NULL_RTX, 1, OPTAB_LIB_WIDEN);
26388 /* Prepare to adjust the return value. */
26389 retval = gen_reg_rtx (SImode);
26390 mode = SImode;
26392 else if (reg_overlap_mentioned_p (retval, oldval))
26393 oldval = copy_to_reg (oldval);
26395 if (mode != TImode && !reg_or_short_operand (oldval, mode))
26396 oldval = copy_to_mode_reg (mode, oldval);
26398 if (reg_overlap_mentioned_p (retval, newval))
26399 newval = copy_to_reg (newval);
26401 mem = rs6000_pre_atomic_barrier (mem, mod_s);
26403 label1 = NULL_RTX;
26404 if (!is_weak)
26406 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
26407 emit_label (XEXP (label1, 0));
26409 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
26411 emit_load_locked (mode, retval, mem);
26413 x = retval;
26414 if (mask)
26415 x = expand_simple_binop (SImode, AND, retval, mask,
26416 NULL_RTX, 1, OPTAB_LIB_WIDEN);
26418 cond = gen_reg_rtx (CCmode);
26419 /* If we have TImode, synthesize a comparison. */
26420 if (mode != TImode)
26421 x = gen_rtx_COMPARE (CCmode, x, oldval);
26422 else
26424 rtx xor1_result = gen_reg_rtx (DImode);
26425 rtx xor2_result = gen_reg_rtx (DImode);
26426 rtx or_result = gen_reg_rtx (DImode);
26427 rtx new_word0 = simplify_gen_subreg (DImode, x, TImode, 0);
26428 rtx new_word1 = simplify_gen_subreg (DImode, x, TImode, 8);
26429 rtx old_word0 = simplify_gen_subreg (DImode, oldval, TImode, 0);
26430 rtx old_word1 = simplify_gen_subreg (DImode, oldval, TImode, 8);
26432 emit_insn (gen_xordi3 (xor1_result, new_word0, old_word0));
26433 emit_insn (gen_xordi3 (xor2_result, new_word1, old_word1));
26434 emit_insn (gen_iordi3 (or_result, xor1_result, xor2_result));
26435 x = gen_rtx_COMPARE (CCmode, or_result, const0_rtx);
26438 emit_insn (gen_rtx_SET (cond, x));
26440 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
26441 emit_unlikely_jump (x, label2);
26443 x = newval;
26444 if (mask)
26445 x = rs6000_mask_atomic_subword (retval, newval, mask);
26447 emit_store_conditional (orig_mode, cond, mem, x);
26449 if (!is_weak)
26451 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
26452 emit_unlikely_jump (x, label1);
26455 if (!is_mm_relaxed (mod_f))
26456 emit_label (XEXP (label2, 0));
26458 rs6000_post_atomic_barrier (mod_s);
26460 if (is_mm_relaxed (mod_f))
26461 emit_label (XEXP (label2, 0));
26463 if (shift)
26464 rs6000_finish_atomic_subword (operands[1], retval, shift);
26465 else if (mode != GET_MODE (operands[1]))
26466 convert_move (operands[1], retval, 1);
26468 /* In all cases, CR0 contains EQ on success, and NE on failure. */
26469 x = gen_rtx_EQ (SImode, cond, const0_rtx);
26470 emit_insn (gen_rtx_SET (boolval, x));
26473 /* Expand an atomic exchange operation. */
26475 void
26476 rs6000_expand_atomic_exchange (rtx operands[])
26478 rtx retval, mem, val, cond;
26479 machine_mode mode;
26480 enum memmodel model;
26481 rtx label, x, mask, shift;
26483 retval = operands[0];
26484 mem = operands[1];
26485 val = operands[2];
26486 model = memmodel_base (INTVAL (operands[3]));
26487 mode = GET_MODE (mem);
26489 mask = shift = NULL_RTX;
26490 if (!TARGET_SYNC_HI_QI && (mode == QImode || mode == HImode))
26492 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
26494 /* Shift and mask VAL into position with the word. */
26495 val = convert_modes (SImode, mode, val, 1);
26496 val = expand_simple_binop (SImode, ASHIFT, val, shift,
26497 NULL_RTX, 1, OPTAB_LIB_WIDEN);
26499 /* Prepare to adjust the return value. */
26500 retval = gen_reg_rtx (SImode);
26501 mode = SImode;
26504 mem = rs6000_pre_atomic_barrier (mem, model);
26506 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
26507 emit_label (XEXP (label, 0));
26509 emit_load_locked (mode, retval, mem);
26511 x = val;
26512 if (mask)
26513 x = rs6000_mask_atomic_subword (retval, val, mask);
26515 cond = gen_reg_rtx (CCmode);
26516 emit_store_conditional (mode, cond, mem, x);
26518 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
26519 emit_unlikely_jump (x, label);
26521 rs6000_post_atomic_barrier (model);
26523 if (shift)
26524 rs6000_finish_atomic_subword (operands[0], retval, shift);
26527 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
26528 to perform. MEM is the memory on which to operate. VAL is the second
26529 operand of the binary operator. BEFORE and AFTER are optional locations to
26530 return the value of MEM either before of after the operation. MODEL_RTX
26531 is a CONST_INT containing the memory model to use. */
26533 void
26534 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
26535 rtx orig_before, rtx orig_after, rtx model_rtx)
26537 enum memmodel model = memmodel_base (INTVAL (model_rtx));
26538 machine_mode mode = GET_MODE (mem);
26539 machine_mode store_mode = mode;
26540 rtx label, x, cond, mask, shift;
26541 rtx before = orig_before, after = orig_after;
26543 mask = shift = NULL_RTX;
26544 /* On power8, we want to use SImode for the operation. On previous systems,
26545 use the operation in a subword and shift/mask to get the proper byte or
26546 halfword. */
26547 if (mode == QImode || mode == HImode)
26549 if (TARGET_SYNC_HI_QI)
26551 val = convert_modes (SImode, mode, val, 1);
26553 /* Prepare to adjust the return value. */
26554 before = gen_reg_rtx (SImode);
26555 if (after)
26556 after = gen_reg_rtx (SImode);
26557 mode = SImode;
26559 else
26561 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
26563 /* Shift and mask VAL into position with the word. */
26564 val = convert_modes (SImode, mode, val, 1);
26565 val = expand_simple_binop (SImode, ASHIFT, val, shift,
26566 NULL_RTX, 1, OPTAB_LIB_WIDEN);
26568 switch (code)
26570 case IOR:
26571 case XOR:
26572 /* We've already zero-extended VAL. That is sufficient to
26573 make certain that it does not affect other bits. */
26574 mask = NULL;
26575 break;
26577 case AND:
26578 /* If we make certain that all of the other bits in VAL are
26579 set, that will be sufficient to not affect other bits. */
26580 x = gen_rtx_NOT (SImode, mask);
26581 x = gen_rtx_IOR (SImode, x, val);
26582 emit_insn (gen_rtx_SET (val, x));
26583 mask = NULL;
26584 break;
26586 case NOT:
26587 case PLUS:
26588 case MINUS:
26589 /* These will all affect bits outside the field and need
26590 adjustment via MASK within the loop. */
26591 break;
26593 default:
26594 gcc_unreachable ();
26597 /* Prepare to adjust the return value. */
26598 before = gen_reg_rtx (SImode);
26599 if (after)
26600 after = gen_reg_rtx (SImode);
26601 store_mode = mode = SImode;
26605 mem = rs6000_pre_atomic_barrier (mem, model);
26607 label = gen_label_rtx ();
26608 emit_label (label);
26609 label = gen_rtx_LABEL_REF (VOIDmode, label);
26611 if (before == NULL_RTX)
26612 before = gen_reg_rtx (mode);
26614 emit_load_locked (mode, before, mem);
26616 if (code == NOT)
26618 x = expand_simple_binop (mode, AND, before, val,
26619 NULL_RTX, 1, OPTAB_LIB_WIDEN);
26620 after = expand_simple_unop (mode, NOT, x, after, 1);
26622 else
26624 after = expand_simple_binop (mode, code, before, val,
26625 after, 1, OPTAB_LIB_WIDEN);
26628 x = after;
26629 if (mask)
26631 x = expand_simple_binop (SImode, AND, after, mask,
26632 NULL_RTX, 1, OPTAB_LIB_WIDEN);
26633 x = rs6000_mask_atomic_subword (before, x, mask);
26635 else if (store_mode != mode)
26636 x = convert_modes (store_mode, mode, x, 1);
26638 cond = gen_reg_rtx (CCmode);
26639 emit_store_conditional (store_mode, cond, mem, x);
26641 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
26642 emit_unlikely_jump (x, label);
26644 rs6000_post_atomic_barrier (model);
26646 if (shift)
26648 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
26649 then do the calcuations in a SImode register. */
26650 if (orig_before)
26651 rs6000_finish_atomic_subword (orig_before, before, shift);
26652 if (orig_after)
26653 rs6000_finish_atomic_subword (orig_after, after, shift);
26655 else if (store_mode != mode)
26657 /* QImode/HImode on machines with lbarx/lharx where we do the native
26658 operation and then do the calcuations in a SImode register. */
26659 if (orig_before)
26660 convert_move (orig_before, before, 1);
26661 if (orig_after)
26662 convert_move (orig_after, after, 1);
26664 else if (orig_after && after != orig_after)
26665 emit_move_insn (orig_after, after);
26668 /* Emit instructions to move SRC to DST. Called by splitters for
26669 multi-register moves. It will emit at most one instruction for
26670 each register that is accessed; that is, it won't emit li/lis pairs
26671 (or equivalent for 64-bit code). One of SRC or DST must be a hard
26672 register. */
26674 void
26675 rs6000_split_multireg_move (rtx dst, rtx src)
26677 /* The register number of the first register being moved. */
26678 int reg;
26679 /* The mode that is to be moved. */
26680 machine_mode mode;
26681 /* The mode that the move is being done in, and its size. */
26682 machine_mode reg_mode;
26683 int reg_mode_size;
26684 /* The number of registers that will be moved. */
26685 int nregs;
26687 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
26688 mode = GET_MODE (dst);
26689 nregs = hard_regno_nregs[reg][mode];
26690 if (FP_REGNO_P (reg))
26691 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
26692 ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? DFmode : SFmode);
26693 else if (ALTIVEC_REGNO_P (reg))
26694 reg_mode = V16QImode;
26695 else if (TARGET_E500_DOUBLE && FLOAT128_2REG_P (mode))
26696 reg_mode = DFmode;
26697 else
26698 reg_mode = word_mode;
26699 reg_mode_size = GET_MODE_SIZE (reg_mode);
26701 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
26703 /* TDmode residing in FP registers is special, since the ISA requires that
26704 the lower-numbered word of a register pair is always the most significant
26705 word, even in little-endian mode. This does not match the usual subreg
26706 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
26707 the appropriate constituent registers "by hand" in little-endian mode.
26709 Note we do not need to check for destructive overlap here since TDmode
26710 can only reside in even/odd register pairs. */
26711 if (FP_REGNO_P (reg) && DECIMAL_FLOAT_MODE_P (mode) && !BYTES_BIG_ENDIAN)
26713 rtx p_src, p_dst;
26714 int i;
26716 for (i = 0; i < nregs; i++)
26718 if (REG_P (src) && FP_REGNO_P (REGNO (src)))
26719 p_src = gen_rtx_REG (reg_mode, REGNO (src) + nregs - 1 - i);
26720 else
26721 p_src = simplify_gen_subreg (reg_mode, src, mode,
26722 i * reg_mode_size);
26724 if (REG_P (dst) && FP_REGNO_P (REGNO (dst)))
26725 p_dst = gen_rtx_REG (reg_mode, REGNO (dst) + nregs - 1 - i);
26726 else
26727 p_dst = simplify_gen_subreg (reg_mode, dst, mode,
26728 i * reg_mode_size);
26730 emit_insn (gen_rtx_SET (p_dst, p_src));
26733 return;
26736 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
26738 /* Move register range backwards, if we might have destructive
26739 overlap. */
26740 int i;
26741 for (i = nregs - 1; i >= 0; i--)
26742 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
26743 i * reg_mode_size),
26744 simplify_gen_subreg (reg_mode, src, mode,
26745 i * reg_mode_size)));
26747 else
26749 int i;
26750 int j = -1;
26751 bool used_update = false;
26752 rtx restore_basereg = NULL_RTX;
26754 if (MEM_P (src) && INT_REGNO_P (reg))
26756 rtx breg;
26758 if (GET_CODE (XEXP (src, 0)) == PRE_INC
26759 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
26761 rtx delta_rtx;
26762 breg = XEXP (XEXP (src, 0), 0);
26763 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
26764 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
26765 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
26766 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
26767 src = replace_equiv_address (src, breg);
26769 else if (! rs6000_offsettable_memref_p (src, reg_mode))
26771 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
26773 rtx basereg = XEXP (XEXP (src, 0), 0);
26774 if (TARGET_UPDATE)
26776 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
26777 emit_insn (gen_rtx_SET (ndst,
26778 gen_rtx_MEM (reg_mode,
26779 XEXP (src, 0))));
26780 used_update = true;
26782 else
26783 emit_insn (gen_rtx_SET (basereg,
26784 XEXP (XEXP (src, 0), 1)));
26785 src = replace_equiv_address (src, basereg);
26787 else
26789 rtx basereg = gen_rtx_REG (Pmode, reg);
26790 emit_insn (gen_rtx_SET (basereg, XEXP (src, 0)));
26791 src = replace_equiv_address (src, basereg);
26795 breg = XEXP (src, 0);
26796 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
26797 breg = XEXP (breg, 0);
26799 /* If the base register we are using to address memory is
26800 also a destination reg, then change that register last. */
26801 if (REG_P (breg)
26802 && REGNO (breg) >= REGNO (dst)
26803 && REGNO (breg) < REGNO (dst) + nregs)
26804 j = REGNO (breg) - REGNO (dst);
26806 else if (MEM_P (dst) && INT_REGNO_P (reg))
26808 rtx breg;
26810 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
26811 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
26813 rtx delta_rtx;
26814 breg = XEXP (XEXP (dst, 0), 0);
26815 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
26816 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
26817 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
26819 /* We have to update the breg before doing the store.
26820 Use store with update, if available. */
26822 if (TARGET_UPDATE)
26824 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
26825 emit_insn (TARGET_32BIT
26826 ? (TARGET_POWERPC64
26827 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
26828 : gen_movsi_update (breg, breg, delta_rtx, nsrc))
26829 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
26830 used_update = true;
26832 else
26833 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
26834 dst = replace_equiv_address (dst, breg);
26836 else if (!rs6000_offsettable_memref_p (dst, reg_mode)
26837 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
26839 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
26841 rtx basereg = XEXP (XEXP (dst, 0), 0);
26842 if (TARGET_UPDATE)
26844 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
26845 emit_insn (gen_rtx_SET (gen_rtx_MEM (reg_mode,
26846 XEXP (dst, 0)),
26847 nsrc));
26848 used_update = true;
26850 else
26851 emit_insn (gen_rtx_SET (basereg,
26852 XEXP (XEXP (dst, 0), 1)));
26853 dst = replace_equiv_address (dst, basereg);
26855 else
26857 rtx basereg = XEXP (XEXP (dst, 0), 0);
26858 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
26859 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
26860 && REG_P (basereg)
26861 && REG_P (offsetreg)
26862 && REGNO (basereg) != REGNO (offsetreg));
26863 if (REGNO (basereg) == 0)
26865 rtx tmp = offsetreg;
26866 offsetreg = basereg;
26867 basereg = tmp;
26869 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
26870 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
26871 dst = replace_equiv_address (dst, basereg);
26874 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
26875 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode));
26878 for (i = 0; i < nregs; i++)
26880 /* Calculate index to next subword. */
26881 ++j;
26882 if (j == nregs)
26883 j = 0;
26885 /* If compiler already emitted move of first word by
26886 store with update, no need to do anything. */
26887 if (j == 0 && used_update)
26888 continue;
26890 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
26891 j * reg_mode_size),
26892 simplify_gen_subreg (reg_mode, src, mode,
26893 j * reg_mode_size)));
26895 if (restore_basereg != NULL_RTX)
26896 emit_insn (restore_basereg);
26901 /* This page contains routines that are used to determine what the
26902 function prologue and epilogue code will do and write them out. */
26904 static inline bool
26905 save_reg_p (int r)
26907 return !call_used_regs[r] && df_regs_ever_live_p (r);
26910 /* Determine whether the gp REG is really used. */
26912 static bool
26913 rs6000_reg_live_or_pic_offset_p (int reg)
26915 /* We need to mark the PIC offset register live for the same conditions
26916 as it is set up, or otherwise it won't be saved before we clobber it. */
26918 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM && !TARGET_SINGLE_PIC_BASE)
26920 if (TARGET_TOC && TARGET_MINIMAL_TOC
26921 && (crtl->calls_eh_return
26922 || df_regs_ever_live_p (reg)
26923 || !constant_pool_empty_p ()))
26924 return true;
26926 if ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
26927 && flag_pic)
26928 return true;
26931 /* If the function calls eh_return, claim used all the registers that would
26932 be checked for liveness otherwise. */
26934 return ((crtl->calls_eh_return || df_regs_ever_live_p (reg))
26935 && !call_used_regs[reg]);
26938 /* Return the first fixed-point register that is required to be
26939 saved. 32 if none. */
26942 first_reg_to_save (void)
26944 int first_reg;
26946 /* Find lowest numbered live register. */
26947 for (first_reg = 13; first_reg <= 31; first_reg++)
26948 if (save_reg_p (first_reg))
26949 break;
26951 if (first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM
26952 && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
26953 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
26954 || (TARGET_TOC && TARGET_MINIMAL_TOC))
26955 && rs6000_reg_live_or_pic_offset_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
26956 first_reg = RS6000_PIC_OFFSET_TABLE_REGNUM;
26958 #if TARGET_MACHO
26959 if (flag_pic
26960 && crtl->uses_pic_offset_table
26961 && first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM)
26962 return RS6000_PIC_OFFSET_TABLE_REGNUM;
26963 #endif
26965 return first_reg;
26968 /* Similar, for FP regs. */
26971 first_fp_reg_to_save (void)
26973 int first_reg;
26975 /* Find lowest numbered live register. */
26976 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
26977 if (save_reg_p (first_reg))
26978 break;
26980 return first_reg;
26983 /* Similar, for AltiVec regs. */
26985 static int
26986 first_altivec_reg_to_save (void)
26988 int i;
26990 /* Stack frame remains as is unless we are in AltiVec ABI. */
26991 if (! TARGET_ALTIVEC_ABI)
26992 return LAST_ALTIVEC_REGNO + 1;
26994 /* On Darwin, the unwind routines are compiled without
26995 TARGET_ALTIVEC, and use save_world to save/restore the
26996 altivec registers when necessary. */
26997 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
26998 && ! TARGET_ALTIVEC)
26999 return FIRST_ALTIVEC_REGNO + 20;
27001 /* Find lowest numbered live register. */
27002 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
27003 if (save_reg_p (i))
27004 break;
27006 return i;
27009 /* Return a 32-bit mask of the AltiVec registers we need to set in
27010 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
27011 the 32-bit word is 0. */
27013 static unsigned int
27014 compute_vrsave_mask (void)
27016 unsigned int i, mask = 0;
27018 /* On Darwin, the unwind routines are compiled without
27019 TARGET_ALTIVEC, and use save_world to save/restore the
27020 call-saved altivec registers when necessary. */
27021 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
27022 && ! TARGET_ALTIVEC)
27023 mask |= 0xFFF;
27025 /* First, find out if we use _any_ altivec registers. */
27026 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
27027 if (df_regs_ever_live_p (i))
27028 mask |= ALTIVEC_REG_BIT (i);
27030 if (mask == 0)
27031 return mask;
27033 /* Next, remove the argument registers from the set. These must
27034 be in the VRSAVE mask set by the caller, so we don't need to add
27035 them in again. More importantly, the mask we compute here is
27036 used to generate CLOBBERs in the set_vrsave insn, and we do not
27037 wish the argument registers to die. */
27038 for (i = ALTIVEC_ARG_MIN_REG; i < (unsigned) crtl->args.info.vregno; i++)
27039 mask &= ~ALTIVEC_REG_BIT (i);
27041 /* Similarly, remove the return value from the set. */
27043 bool yes = false;
27044 diddle_return_value (is_altivec_return_reg, &yes);
27045 if (yes)
27046 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
27049 return mask;
27052 /* For a very restricted set of circumstances, we can cut down the
27053 size of prologues/epilogues by calling our own save/restore-the-world
27054 routines. */
27056 static void
27057 compute_save_world_info (rs6000_stack_t *info)
27059 info->world_save_p = 1;
27060 info->world_save_p
27061 = (WORLD_SAVE_P (info)
27062 && DEFAULT_ABI == ABI_DARWIN
27063 && !cfun->has_nonlocal_label
27064 && info->first_fp_reg_save == FIRST_SAVED_FP_REGNO
27065 && info->first_gp_reg_save == FIRST_SAVED_GP_REGNO
27066 && info->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
27067 && info->cr_save_p);
27069 /* This will not work in conjunction with sibcalls. Make sure there
27070 are none. (This check is expensive, but seldom executed.) */
27071 if (WORLD_SAVE_P (info))
27073 rtx_insn *insn;
27074 for (insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
27075 if (CALL_P (insn) && SIBLING_CALL_P (insn))
27077 info->world_save_p = 0;
27078 break;
27082 if (WORLD_SAVE_P (info))
27084 /* Even if we're not touching VRsave, make sure there's room on the
27085 stack for it, if it looks like we're calling SAVE_WORLD, which
27086 will attempt to save it. */
27087 info->vrsave_size = 4;
27089 /* If we are going to save the world, we need to save the link register too. */
27090 info->lr_save_p = 1;
27092 /* "Save" the VRsave register too if we're saving the world. */
27093 if (info->vrsave_mask == 0)
27094 info->vrsave_mask = compute_vrsave_mask ();
27096 /* Because the Darwin register save/restore routines only handle
27097 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
27098 check. */
27099 gcc_assert (info->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
27100 && (info->first_altivec_reg_save
27101 >= FIRST_SAVED_ALTIVEC_REGNO));
27104 return;
27108 static void
27109 is_altivec_return_reg (rtx reg, void *xyes)
27111 bool *yes = (bool *) xyes;
27112 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
27113 *yes = true;
27117 /* Return whether REG is a global user reg or has been specifed by
27118 -ffixed-REG. We should not restore these, and so cannot use
27119 lmw or out-of-line restore functions if there are any. We also
27120 can't save them (well, emit frame notes for them), because frame
27121 unwinding during exception handling will restore saved registers. */
27123 static bool
27124 fixed_reg_p (int reg)
27126 /* Ignore fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] when the
27127 backend sets it, overriding anything the user might have given. */
27128 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
27129 && ((DEFAULT_ABI == ABI_V4 && flag_pic)
27130 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
27131 || (TARGET_TOC && TARGET_MINIMAL_TOC)))
27132 return false;
27134 return fixed_regs[reg];
27137 /* Determine the strategy for savings/restoring registers. */
27139 enum {
27140 SAVE_MULTIPLE = 0x1,
27141 SAVE_INLINE_GPRS = 0x2,
27142 SAVE_INLINE_FPRS = 0x4,
27143 SAVE_NOINLINE_GPRS_SAVES_LR = 0x8,
27144 SAVE_NOINLINE_FPRS_SAVES_LR = 0x10,
27145 SAVE_INLINE_VRS = 0x20,
27146 REST_MULTIPLE = 0x100,
27147 REST_INLINE_GPRS = 0x200,
27148 REST_INLINE_FPRS = 0x400,
27149 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x800,
27150 REST_INLINE_VRS = 0x1000
27153 static int
27154 rs6000_savres_strategy (rs6000_stack_t *info,
27155 bool using_static_chain_p)
27157 int strategy = 0;
27159 /* Select between in-line and out-of-line save and restore of regs.
27160 First, all the obvious cases where we don't use out-of-line. */
27161 if (crtl->calls_eh_return
27162 || cfun->machine->ra_need_lr)
27163 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
27164 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
27165 | SAVE_INLINE_VRS | REST_INLINE_VRS);
27167 if (info->first_gp_reg_save == 32)
27168 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
27170 if (info->first_fp_reg_save == 64
27171 /* The out-of-line FP routines use double-precision stores;
27172 we can't use those routines if we don't have such stores. */
27173 || (TARGET_HARD_FLOAT && !TARGET_DOUBLE_FLOAT))
27174 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
27176 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1)
27177 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
27179 /* Define cutoff for using out-of-line functions to save registers. */
27180 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
27182 if (!optimize_size)
27184 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
27185 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
27186 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
27188 else
27190 /* Prefer out-of-line restore if it will exit. */
27191 if (info->first_fp_reg_save > 61)
27192 strategy |= SAVE_INLINE_FPRS;
27193 if (info->first_gp_reg_save > 29)
27195 if (info->first_fp_reg_save == 64)
27196 strategy |= SAVE_INLINE_GPRS;
27197 else
27198 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
27200 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
27201 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
27204 else if (DEFAULT_ABI == ABI_DARWIN)
27206 if (info->first_fp_reg_save > 60)
27207 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
27208 if (info->first_gp_reg_save > 29)
27209 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
27210 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
27212 else
27214 gcc_checking_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
27215 if ((flag_shrink_wrap_separate && optimize_function_for_speed_p (cfun))
27216 || info->first_fp_reg_save > 61)
27217 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
27218 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
27219 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
27222 /* Don't bother to try to save things out-of-line if r11 is occupied
27223 by the static chain. It would require too much fiddling and the
27224 static chain is rarely used anyway. FPRs are saved w.r.t the stack
27225 pointer on Darwin, and AIX uses r1 or r12. */
27226 if (using_static_chain_p
27227 && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
27228 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
27229 | SAVE_INLINE_GPRS
27230 | SAVE_INLINE_VRS);
27232 /* Saving CR interferes with the exit routines used on the SPE, so
27233 just punt here. */
27234 if (TARGET_SPE_ABI
27235 && info->spe_64bit_regs_used
27236 && info->cr_save_p)
27237 strategy |= REST_INLINE_GPRS;
27239 /* We can only use the out-of-line routines to restore fprs if we've
27240 saved all the registers from first_fp_reg_save in the prologue.
27241 Otherwise, we risk loading garbage. Of course, if we have saved
27242 out-of-line then we know we haven't skipped any fprs. */
27243 if ((strategy & SAVE_INLINE_FPRS)
27244 && !(strategy & REST_INLINE_FPRS))
27246 int i;
27248 for (i = info->first_fp_reg_save; i < 64; i++)
27249 if (fixed_regs[i] || !save_reg_p (i))
27251 strategy |= REST_INLINE_FPRS;
27252 break;
27256 /* Similarly, for altivec regs. */
27257 if ((strategy & SAVE_INLINE_VRS)
27258 && !(strategy & REST_INLINE_VRS))
27260 int i;
27262 for (i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
27263 if (fixed_regs[i] || !save_reg_p (i))
27265 strategy |= REST_INLINE_VRS;
27266 break;
27270 /* info->lr_save_p isn't yet set if the only reason lr needs to be
27271 saved is an out-of-line save or restore. Set up the value for
27272 the next test (excluding out-of-line gprs). */
27273 bool lr_save_p = (info->lr_save_p
27274 || !(strategy & SAVE_INLINE_FPRS)
27275 || !(strategy & SAVE_INLINE_VRS)
27276 || !(strategy & REST_INLINE_FPRS)
27277 || !(strategy & REST_INLINE_VRS));
27279 if (TARGET_MULTIPLE
27280 && !TARGET_POWERPC64
27281 && !(TARGET_SPE_ABI && info->spe_64bit_regs_used)
27282 && info->first_gp_reg_save < 31
27283 && !(flag_shrink_wrap
27284 && flag_shrink_wrap_separate
27285 && optimize_function_for_speed_p (cfun)))
27287 /* Prefer store multiple for saves over out-of-line routines,
27288 since the store-multiple instruction will always be smaller. */
27289 strategy |= SAVE_INLINE_GPRS | SAVE_MULTIPLE;
27291 /* The situation is more complicated with load multiple. We'd
27292 prefer to use the out-of-line routines for restores, since the
27293 "exit" out-of-line routines can handle the restore of LR and the
27294 frame teardown. However if doesn't make sense to use the
27295 out-of-line routine if that is the only reason we'd need to save
27296 LR, and we can't use the "exit" out-of-line gpr restore if we
27297 have saved some fprs; In those cases it is advantageous to use
27298 load multiple when available. */
27299 if (info->first_fp_reg_save != 64 || !lr_save_p)
27300 strategy |= REST_INLINE_GPRS | REST_MULTIPLE;
27303 /* Using the "exit" out-of-line routine does not improve code size
27304 if using it would require lr to be saved and if only saving one
27305 or two gprs. */
27306 else if (!lr_save_p && info->first_gp_reg_save > 29)
27307 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
27309 /* We can only use load multiple or the out-of-line routines to
27310 restore gprs if we've saved all the registers from
27311 first_gp_reg_save. Otherwise, we risk loading garbage.
27312 Of course, if we have saved out-of-line or used stmw then we know
27313 we haven't skipped any gprs. */
27314 if ((strategy & (SAVE_INLINE_GPRS | SAVE_MULTIPLE)) == SAVE_INLINE_GPRS
27315 && (strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
27317 int i;
27319 for (i = info->first_gp_reg_save; i < 32; i++)
27320 if (fixed_reg_p (i) || !save_reg_p (i))
27322 strategy |= REST_INLINE_GPRS;
27323 strategy &= ~REST_MULTIPLE;
27324 break;
27328 if (TARGET_ELF && TARGET_64BIT)
27330 if (!(strategy & SAVE_INLINE_FPRS))
27331 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
27332 else if (!(strategy & SAVE_INLINE_GPRS)
27333 && info->first_fp_reg_save == 64)
27334 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
27336 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
27337 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
27339 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
27340 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
27342 return strategy;
27345 /* Calculate the stack information for the current function. This is
27346 complicated by having two separate calling sequences, the AIX calling
27347 sequence and the V.4 calling sequence.
27349 AIX (and Darwin/Mac OS X) stack frames look like:
27350 32-bit 64-bit
27351 SP----> +---------------------------------------+
27352 | back chain to caller | 0 0
27353 +---------------------------------------+
27354 | saved CR | 4 8 (8-11)
27355 +---------------------------------------+
27356 | saved LR | 8 16
27357 +---------------------------------------+
27358 | reserved for compilers | 12 24
27359 +---------------------------------------+
27360 | reserved for binders | 16 32
27361 +---------------------------------------+
27362 | saved TOC pointer | 20 40
27363 +---------------------------------------+
27364 | Parameter save area (+padding*) (P) | 24 48
27365 +---------------------------------------+
27366 | Alloca space (A) | 24+P etc.
27367 +---------------------------------------+
27368 | Local variable space (L) | 24+P+A
27369 +---------------------------------------+
27370 | Float/int conversion temporary (X) | 24+P+A+L
27371 +---------------------------------------+
27372 | Save area for AltiVec registers (W) | 24+P+A+L+X
27373 +---------------------------------------+
27374 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
27375 +---------------------------------------+
27376 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
27377 +---------------------------------------+
27378 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
27379 +---------------------------------------+
27380 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
27381 +---------------------------------------+
27382 old SP->| back chain to caller's caller |
27383 +---------------------------------------+
27385 * If the alloca area is present, the parameter save area is
27386 padded so that the former starts 16-byte aligned.
27388 The required alignment for AIX configurations is two words (i.e., 8
27389 or 16 bytes).
27391 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
27393 SP----> +---------------------------------------+
27394 | Back chain to caller | 0
27395 +---------------------------------------+
27396 | Save area for CR | 8
27397 +---------------------------------------+
27398 | Saved LR | 16
27399 +---------------------------------------+
27400 | Saved TOC pointer | 24
27401 +---------------------------------------+
27402 | Parameter save area (+padding*) (P) | 32
27403 +---------------------------------------+
27404 | Alloca space (A) | 32+P
27405 +---------------------------------------+
27406 | Local variable space (L) | 32+P+A
27407 +---------------------------------------+
27408 | Save area for AltiVec registers (W) | 32+P+A+L
27409 +---------------------------------------+
27410 | AltiVec alignment padding (Y) | 32+P+A+L+W
27411 +---------------------------------------+
27412 | Save area for GP registers (G) | 32+P+A+L+W+Y
27413 +---------------------------------------+
27414 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
27415 +---------------------------------------+
27416 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
27417 +---------------------------------------+
27419 * If the alloca area is present, the parameter save area is
27420 padded so that the former starts 16-byte aligned.
27422 V.4 stack frames look like:
27424 SP----> +---------------------------------------+
27425 | back chain to caller | 0
27426 +---------------------------------------+
27427 | caller's saved LR | 4
27428 +---------------------------------------+
27429 | Parameter save area (+padding*) (P) | 8
27430 +---------------------------------------+
27431 | Alloca space (A) | 8+P
27432 +---------------------------------------+
27433 | Varargs save area (V) | 8+P+A
27434 +---------------------------------------+
27435 | Local variable space (L) | 8+P+A+V
27436 +---------------------------------------+
27437 | Float/int conversion temporary (X) | 8+P+A+V+L
27438 +---------------------------------------+
27439 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
27440 +---------------------------------------+
27441 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
27442 +---------------------------------------+
27443 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
27444 +---------------------------------------+
27445 | SPE: area for 64-bit GP registers |
27446 +---------------------------------------+
27447 | SPE alignment padding |
27448 +---------------------------------------+
27449 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
27450 +---------------------------------------+
27451 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
27452 +---------------------------------------+
27453 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
27454 +---------------------------------------+
27455 old SP->| back chain to caller's caller |
27456 +---------------------------------------+
27458 * If the alloca area is present and the required alignment is
27459 16 bytes, the parameter save area is padded so that the
27460 alloca area starts 16-byte aligned.
27462 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
27463 given. (But note below and in sysv4.h that we require only 8 and
27464 may round up the size of our stack frame anyways. The historical
27465 reason is early versions of powerpc-linux which didn't properly
27466 align the stack at program startup. A happy side-effect is that
27467 -mno-eabi libraries can be used with -meabi programs.)
27469 The EABI configuration defaults to the V.4 layout. However,
27470 the stack alignment requirements may differ. If -mno-eabi is not
27471 given, the required stack alignment is 8 bytes; if -mno-eabi is
27472 given, the required alignment is 16 bytes. (But see V.4 comment
27473 above.) */
27475 #ifndef ABI_STACK_BOUNDARY
27476 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
27477 #endif
27479 static rs6000_stack_t *
27480 rs6000_stack_info (void)
27482 /* We should never be called for thunks, we are not set up for that. */
27483 gcc_assert (!cfun->is_thunk);
27485 rs6000_stack_t *info = &stack_info;
27486 int reg_size = TARGET_32BIT ? 4 : 8;
27487 int ehrd_size;
27488 int ehcr_size;
27489 int save_align;
27490 int first_gp;
27491 HOST_WIDE_INT non_fixed_size;
27492 bool using_static_chain_p;
27494 if (reload_completed && info->reload_completed)
27495 return info;
27497 memset (info, 0, sizeof (*info));
27498 info->reload_completed = reload_completed;
27500 if (TARGET_SPE)
27502 /* Cache value so we don't rescan instruction chain over and over. */
27503 if (cfun->machine->spe_insn_chain_scanned_p == 0)
27504 cfun->machine->spe_insn_chain_scanned_p
27505 = spe_func_has_64bit_regs_p () + 1;
27506 info->spe_64bit_regs_used = cfun->machine->spe_insn_chain_scanned_p - 1;
27509 /* Select which calling sequence. */
27510 info->abi = DEFAULT_ABI;
27512 /* Calculate which registers need to be saved & save area size. */
27513 info->first_gp_reg_save = first_reg_to_save ();
27514 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
27515 even if it currently looks like we won't. Reload may need it to
27516 get at a constant; if so, it will have already created a constant
27517 pool entry for it. */
27518 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
27519 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
27520 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
27521 && crtl->uses_const_pool
27522 && info->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
27523 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
27524 else
27525 first_gp = info->first_gp_reg_save;
27527 info->gp_size = reg_size * (32 - first_gp);
27529 /* For the SPE, we have an additional upper 32-bits on each GPR.
27530 Ideally we should save the entire 64-bits only when the upper
27531 half is used in SIMD instructions. Since we only record
27532 registers live (not the size they are used in), this proves
27533 difficult because we'd have to traverse the instruction chain at
27534 the right time, taking reload into account. This is a real pain,
27535 so we opt to save the GPRs in 64-bits always if but one register
27536 gets used in 64-bits. Otherwise, all the registers in the frame
27537 get saved in 32-bits.
27539 So... since when we save all GPRs (except the SP) in 64-bits, the
27540 traditional GP save area will be empty. */
27541 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
27542 info->gp_size = 0;
27544 info->first_fp_reg_save = first_fp_reg_to_save ();
27545 info->fp_size = 8 * (64 - info->first_fp_reg_save);
27547 info->first_altivec_reg_save = first_altivec_reg_to_save ();
27548 info->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
27549 - info->first_altivec_reg_save);
27551 /* Does this function call anything? */
27552 info->calls_p = (!crtl->is_leaf || cfun->machine->ra_needs_full_frame);
27554 /* Determine if we need to save the condition code registers. */
27555 if (save_reg_p (CR2_REGNO)
27556 || save_reg_p (CR3_REGNO)
27557 || save_reg_p (CR4_REGNO))
27559 info->cr_save_p = 1;
27560 if (DEFAULT_ABI == ABI_V4)
27561 info->cr_size = reg_size;
27564 /* If the current function calls __builtin_eh_return, then we need
27565 to allocate stack space for registers that will hold data for
27566 the exception handler. */
27567 if (crtl->calls_eh_return)
27569 unsigned int i;
27570 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
27571 continue;
27573 /* SPE saves EH registers in 64-bits. */
27574 ehrd_size = i * (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0
27575 ? UNITS_PER_SPE_WORD : UNITS_PER_WORD);
27577 else
27578 ehrd_size = 0;
27580 /* In the ELFv2 ABI, we also need to allocate space for separate
27581 CR field save areas if the function calls __builtin_eh_return. */
27582 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
27584 /* This hard-codes that we have three call-saved CR fields. */
27585 ehcr_size = 3 * reg_size;
27586 /* We do *not* use the regular CR save mechanism. */
27587 info->cr_save_p = 0;
27589 else
27590 ehcr_size = 0;
27592 /* Determine various sizes. */
27593 info->reg_size = reg_size;
27594 info->fixed_size = RS6000_SAVE_AREA;
27595 info->vars_size = RS6000_ALIGN (get_frame_size (), 8);
27596 if (cfun->calls_alloca)
27597 info->parm_size =
27598 RS6000_ALIGN (crtl->outgoing_args_size + info->fixed_size,
27599 STACK_BOUNDARY / BITS_PER_UNIT) - info->fixed_size;
27600 else
27601 info->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
27602 TARGET_ALTIVEC ? 16 : 8);
27603 if (FRAME_GROWS_DOWNWARD)
27604 info->vars_size
27605 += RS6000_ALIGN (info->fixed_size + info->vars_size + info->parm_size,
27606 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
27607 - (info->fixed_size + info->vars_size + info->parm_size);
27609 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
27610 info->spe_gp_size = 8 * (32 - first_gp);
27612 if (TARGET_ALTIVEC_ABI)
27613 info->vrsave_mask = compute_vrsave_mask ();
27615 if (TARGET_ALTIVEC_VRSAVE && info->vrsave_mask)
27616 info->vrsave_size = 4;
27618 compute_save_world_info (info);
27620 /* Calculate the offsets. */
27621 switch (DEFAULT_ABI)
27623 case ABI_NONE:
27624 default:
27625 gcc_unreachable ();
27627 case ABI_AIX:
27628 case ABI_ELFv2:
27629 case ABI_DARWIN:
27630 info->fp_save_offset = -info->fp_size;
27631 info->gp_save_offset = info->fp_save_offset - info->gp_size;
27633 if (TARGET_ALTIVEC_ABI)
27635 info->vrsave_save_offset = info->gp_save_offset - info->vrsave_size;
27637 /* Align stack so vector save area is on a quadword boundary.
27638 The padding goes above the vectors. */
27639 if (info->altivec_size != 0)
27640 info->altivec_padding_size = info->vrsave_save_offset & 0xF;
27642 info->altivec_save_offset = info->vrsave_save_offset
27643 - info->altivec_padding_size
27644 - info->altivec_size;
27645 gcc_assert (info->altivec_size == 0
27646 || info->altivec_save_offset % 16 == 0);
27648 /* Adjust for AltiVec case. */
27649 info->ehrd_offset = info->altivec_save_offset - ehrd_size;
27651 else
27652 info->ehrd_offset = info->gp_save_offset - ehrd_size;
27654 info->ehcr_offset = info->ehrd_offset - ehcr_size;
27655 info->cr_save_offset = reg_size; /* first word when 64-bit. */
27656 info->lr_save_offset = 2*reg_size;
27657 break;
27659 case ABI_V4:
27660 info->fp_save_offset = -info->fp_size;
27661 info->gp_save_offset = info->fp_save_offset - info->gp_size;
27662 info->cr_save_offset = info->gp_save_offset - info->cr_size;
27664 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
27666 /* Align stack so SPE GPR save area is aligned on a
27667 double-word boundary. */
27668 if (info->spe_gp_size != 0 && info->cr_save_offset != 0)
27669 info->spe_padding_size = 8 - (-info->cr_save_offset % 8);
27670 else
27671 info->spe_padding_size = 0;
27673 info->spe_gp_save_offset = info->cr_save_offset
27674 - info->spe_padding_size
27675 - info->spe_gp_size;
27677 /* Adjust for SPE case. */
27678 info->ehrd_offset = info->spe_gp_save_offset;
27680 else if (TARGET_ALTIVEC_ABI)
27682 info->vrsave_save_offset = info->cr_save_offset - info->vrsave_size;
27684 /* Align stack so vector save area is on a quadword boundary. */
27685 if (info->altivec_size != 0)
27686 info->altivec_padding_size = 16 - (-info->vrsave_save_offset % 16);
27688 info->altivec_save_offset = info->vrsave_save_offset
27689 - info->altivec_padding_size
27690 - info->altivec_size;
27692 /* Adjust for AltiVec case. */
27693 info->ehrd_offset = info->altivec_save_offset;
27695 else
27696 info->ehrd_offset = info->cr_save_offset;
27698 info->ehrd_offset -= ehrd_size;
27699 info->lr_save_offset = reg_size;
27702 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
27703 info->save_size = RS6000_ALIGN (info->fp_size
27704 + info->gp_size
27705 + info->altivec_size
27706 + info->altivec_padding_size
27707 + info->spe_gp_size
27708 + info->spe_padding_size
27709 + ehrd_size
27710 + ehcr_size
27711 + info->cr_size
27712 + info->vrsave_size,
27713 save_align);
27715 non_fixed_size = info->vars_size + info->parm_size + info->save_size;
27717 info->total_size = RS6000_ALIGN (non_fixed_size + info->fixed_size,
27718 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
27720 /* Determine if we need to save the link register. */
27721 if (info->calls_p
27722 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27723 && crtl->profile
27724 && !TARGET_PROFILE_KERNEL)
27725 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
27726 #ifdef TARGET_RELOCATABLE
27727 || (DEFAULT_ABI == ABI_V4
27728 && (TARGET_RELOCATABLE || flag_pic > 1)
27729 && !constant_pool_empty_p ())
27730 #endif
27731 || rs6000_ra_ever_killed ())
27732 info->lr_save_p = 1;
27734 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
27735 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
27736 && call_used_regs[STATIC_CHAIN_REGNUM]);
27737 info->savres_strategy = rs6000_savres_strategy (info, using_static_chain_p);
27739 if (!(info->savres_strategy & SAVE_INLINE_GPRS)
27740 || !(info->savres_strategy & SAVE_INLINE_FPRS)
27741 || !(info->savres_strategy & SAVE_INLINE_VRS)
27742 || !(info->savres_strategy & REST_INLINE_GPRS)
27743 || !(info->savres_strategy & REST_INLINE_FPRS)
27744 || !(info->savres_strategy & REST_INLINE_VRS))
27745 info->lr_save_p = 1;
27747 if (info->lr_save_p)
27748 df_set_regs_ever_live (LR_REGNO, true);
27750 /* Determine if we need to allocate any stack frame:
27752 For AIX we need to push the stack if a frame pointer is needed
27753 (because the stack might be dynamically adjusted), if we are
27754 debugging, if we make calls, or if the sum of fp_save, gp_save,
27755 and local variables are more than the space needed to save all
27756 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
27757 + 18*8 = 288 (GPR13 reserved).
27759 For V.4 we don't have the stack cushion that AIX uses, but assume
27760 that the debugger can handle stackless frames. */
27762 if (info->calls_p)
27763 info->push_p = 1;
27765 else if (DEFAULT_ABI == ABI_V4)
27766 info->push_p = non_fixed_size != 0;
27768 else if (frame_pointer_needed)
27769 info->push_p = 1;
27771 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
27772 info->push_p = 1;
27774 else
27775 info->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
27777 return info;
27780 /* Return true if the current function uses any GPRs in 64-bit SIMD
27781 mode. */
27783 static bool
27784 spe_func_has_64bit_regs_p (void)
27786 rtx_insn *insns, *insn;
27788 /* Functions that save and restore all the call-saved registers will
27789 need to save/restore the registers in 64-bits. */
27790 if (crtl->calls_eh_return
27791 || cfun->calls_setjmp
27792 || crtl->has_nonlocal_goto)
27793 return true;
27795 insns = get_insns ();
27797 for (insn = NEXT_INSN (insns); insn != NULL_RTX; insn = NEXT_INSN (insn))
27799 if (INSN_P (insn))
27801 rtx i;
27803 /* FIXME: This should be implemented with attributes...
27805 (set_attr "spe64" "true")....then,
27806 if (get_spe64(insn)) return true;
27808 It's the only reliable way to do the stuff below. */
27810 i = PATTERN (insn);
27811 if (GET_CODE (i) == SET)
27813 machine_mode mode = GET_MODE (SET_SRC (i));
27815 if (SPE_VECTOR_MODE (mode))
27816 return true;
27817 if (TARGET_E500_DOUBLE
27818 && (mode == DFmode || FLOAT128_2REG_P (mode)))
27819 return true;
27824 return false;
27827 static void
27828 debug_stack_info (rs6000_stack_t *info)
27830 const char *abi_string;
27832 if (! info)
27833 info = rs6000_stack_info ();
27835 fprintf (stderr, "\nStack information for function %s:\n",
27836 ((current_function_decl && DECL_NAME (current_function_decl))
27837 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
27838 : "<unknown>"));
27840 switch (info->abi)
27842 default: abi_string = "Unknown"; break;
27843 case ABI_NONE: abi_string = "NONE"; break;
27844 case ABI_AIX: abi_string = "AIX"; break;
27845 case ABI_ELFv2: abi_string = "ELFv2"; break;
27846 case ABI_DARWIN: abi_string = "Darwin"; break;
27847 case ABI_V4: abi_string = "V.4"; break;
27850 fprintf (stderr, "\tABI = %5s\n", abi_string);
27852 if (TARGET_ALTIVEC_ABI)
27853 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
27855 if (TARGET_SPE_ABI)
27856 fprintf (stderr, "\tSPE ABI extensions enabled.\n");
27858 if (info->first_gp_reg_save != 32)
27859 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
27861 if (info->first_fp_reg_save != 64)
27862 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
27864 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
27865 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
27866 info->first_altivec_reg_save);
27868 if (info->lr_save_p)
27869 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
27871 if (info->cr_save_p)
27872 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
27874 if (info->vrsave_mask)
27875 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
27877 if (info->push_p)
27878 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
27880 if (info->calls_p)
27881 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
27883 if (info->gp_size)
27884 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
27886 if (info->fp_size)
27887 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
27889 if (info->altivec_size)
27890 fprintf (stderr, "\taltivec_save_offset = %5d\n",
27891 info->altivec_save_offset);
27893 if (info->spe_gp_size)
27894 fprintf (stderr, "\tspe_gp_save_offset = %5d\n",
27895 info->spe_gp_save_offset);
27897 if (info->vrsave_size)
27898 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
27899 info->vrsave_save_offset);
27901 if (info->lr_save_p)
27902 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
27904 if (info->cr_save_p)
27905 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
27907 if (info->varargs_save_offset)
27908 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
27910 if (info->total_size)
27911 fprintf (stderr, "\ttotal_size = " HOST_WIDE_INT_PRINT_DEC"\n",
27912 info->total_size);
27914 if (info->vars_size)
27915 fprintf (stderr, "\tvars_size = " HOST_WIDE_INT_PRINT_DEC"\n",
27916 info->vars_size);
27918 if (info->parm_size)
27919 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
27921 if (info->fixed_size)
27922 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
27924 if (info->gp_size)
27925 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
27927 if (info->spe_gp_size)
27928 fprintf (stderr, "\tspe_gp_size = %5d\n", info->spe_gp_size);
27930 if (info->fp_size)
27931 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
27933 if (info->altivec_size)
27934 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
27936 if (info->vrsave_size)
27937 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
27939 if (info->altivec_padding_size)
27940 fprintf (stderr, "\taltivec_padding_size= %5d\n",
27941 info->altivec_padding_size);
27943 if (info->spe_padding_size)
27944 fprintf (stderr, "\tspe_padding_size = %5d\n",
27945 info->spe_padding_size);
27947 if (info->cr_size)
27948 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
27950 if (info->save_size)
27951 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
27953 if (info->reg_size != 4)
27954 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
27956 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
27958 fprintf (stderr, "\n");
27962 rs6000_return_addr (int count, rtx frame)
27964 /* Currently we don't optimize very well between prolog and body
27965 code and for PIC code the code can be actually quite bad, so
27966 don't try to be too clever here. */
27967 if (count != 0
27968 || ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN) && flag_pic))
27970 cfun->machine->ra_needs_full_frame = 1;
27972 return
27973 gen_rtx_MEM
27974 (Pmode,
27975 memory_address
27976 (Pmode,
27977 plus_constant (Pmode,
27978 copy_to_reg
27979 (gen_rtx_MEM (Pmode,
27980 memory_address (Pmode, frame))),
27981 RETURN_ADDRESS_OFFSET)));
27984 cfun->machine->ra_need_lr = 1;
27985 return get_hard_reg_initial_val (Pmode, LR_REGNO);
27988 /* Say whether a function is a candidate for sibcall handling or not. */
27990 static bool
27991 rs6000_function_ok_for_sibcall (tree decl, tree exp)
27993 tree fntype;
27995 if (decl)
27996 fntype = TREE_TYPE (decl);
27997 else
27998 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
28000 /* We can't do it if the called function has more vector parameters
28001 than the current function; there's nowhere to put the VRsave code. */
28002 if (TARGET_ALTIVEC_ABI
28003 && TARGET_ALTIVEC_VRSAVE
28004 && !(decl && decl == current_function_decl))
28006 function_args_iterator args_iter;
28007 tree type;
28008 int nvreg = 0;
28010 /* Functions with vector parameters are required to have a
28011 prototype, so the argument type info must be available
28012 here. */
28013 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
28014 if (TREE_CODE (type) == VECTOR_TYPE
28015 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
28016 nvreg++;
28018 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
28019 if (TREE_CODE (type) == VECTOR_TYPE
28020 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
28021 nvreg--;
28023 if (nvreg > 0)
28024 return false;
28027 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
28028 functions, because the callee may have a different TOC pointer to
28029 the caller and there's no way to ensure we restore the TOC when
28030 we return. With the secure-plt SYSV ABI we can't make non-local
28031 calls when -fpic/PIC because the plt call stubs use r30. */
28032 if (DEFAULT_ABI == ABI_DARWIN
28033 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
28034 && decl
28035 && !DECL_EXTERNAL (decl)
28036 && !DECL_WEAK (decl)
28037 && (*targetm.binds_local_p) (decl))
28038 || (DEFAULT_ABI == ABI_V4
28039 && (!TARGET_SECURE_PLT
28040 || !flag_pic
28041 || (decl
28042 && (*targetm.binds_local_p) (decl)))))
28044 tree attr_list = TYPE_ATTRIBUTES (fntype);
28046 if (!lookup_attribute ("longcall", attr_list)
28047 || lookup_attribute ("shortcall", attr_list))
28048 return true;
28051 return false;
28054 static int
28055 rs6000_ra_ever_killed (void)
28057 rtx_insn *top;
28058 rtx reg;
28059 rtx_insn *insn;
28061 if (cfun->is_thunk)
28062 return 0;
28064 if (cfun->machine->lr_save_state)
28065 return cfun->machine->lr_save_state - 1;
28067 /* regs_ever_live has LR marked as used if any sibcalls are present,
28068 but this should not force saving and restoring in the
28069 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
28070 clobbers LR, so that is inappropriate. */
28072 /* Also, the prologue can generate a store into LR that
28073 doesn't really count, like this:
28075 move LR->R0
28076 bcl to set PIC register
28077 move LR->R31
28078 move R0->LR
28080 When we're called from the epilogue, we need to avoid counting
28081 this as a store. */
28083 push_topmost_sequence ();
28084 top = get_insns ();
28085 pop_topmost_sequence ();
28086 reg = gen_rtx_REG (Pmode, LR_REGNO);
28088 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
28090 if (INSN_P (insn))
28092 if (CALL_P (insn))
28094 if (!SIBLING_CALL_P (insn))
28095 return 1;
28097 else if (find_regno_note (insn, REG_INC, LR_REGNO))
28098 return 1;
28099 else if (set_of (reg, insn) != NULL_RTX
28100 && !prologue_epilogue_contains (insn))
28101 return 1;
28104 return 0;
28107 /* Emit instructions needed to load the TOC register.
28108 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
28109 a constant pool; or for SVR4 -fpic. */
28111 void
28112 rs6000_emit_load_toc_table (int fromprolog)
28114 rtx dest;
28115 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
28117 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI == ABI_V4 && flag_pic)
28119 char buf[30];
28120 rtx lab, tmp1, tmp2, got;
28122 lab = gen_label_rtx ();
28123 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
28124 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
28125 if (flag_pic == 2)
28127 got = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
28128 need_toc_init = 1;
28130 else
28131 got = rs6000_got_sym ();
28132 tmp1 = tmp2 = dest;
28133 if (!fromprolog)
28135 tmp1 = gen_reg_rtx (Pmode);
28136 tmp2 = gen_reg_rtx (Pmode);
28138 emit_insn (gen_load_toc_v4_PIC_1 (lab));
28139 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
28140 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
28141 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
28143 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
28145 emit_insn (gen_load_toc_v4_pic_si ());
28146 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
28148 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 2)
28150 char buf[30];
28151 rtx temp0 = (fromprolog
28152 ? gen_rtx_REG (Pmode, 0)
28153 : gen_reg_rtx (Pmode));
28155 if (fromprolog)
28157 rtx symF, symL;
28159 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
28160 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
28162 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
28163 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
28165 emit_insn (gen_load_toc_v4_PIC_1 (symF));
28166 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
28167 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
28169 else
28171 rtx tocsym, lab;
28173 tocsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
28174 need_toc_init = 1;
28175 lab = gen_label_rtx ();
28176 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
28177 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
28178 if (TARGET_LINK_STACK)
28179 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
28180 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
28182 emit_insn (gen_addsi3 (dest, temp0, dest));
28184 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
28186 /* This is for AIX code running in non-PIC ELF32. */
28187 rtx realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
28189 need_toc_init = 1;
28190 emit_insn (gen_elf_high (dest, realsym));
28191 emit_insn (gen_elf_low (dest, dest, realsym));
28193 else
28195 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
28197 if (TARGET_32BIT)
28198 emit_insn (gen_load_toc_aix_si (dest));
28199 else
28200 emit_insn (gen_load_toc_aix_di (dest));
28204 /* Emit instructions to restore the link register after determining where
28205 its value has been stored. */
28207 void
28208 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
28210 rs6000_stack_t *info = rs6000_stack_info ();
28211 rtx operands[2];
28213 operands[0] = source;
28214 operands[1] = scratch;
28216 if (info->lr_save_p)
28218 rtx frame_rtx = stack_pointer_rtx;
28219 HOST_WIDE_INT sp_offset = 0;
28220 rtx tmp;
28222 if (frame_pointer_needed
28223 || cfun->calls_alloca
28224 || info->total_size > 32767)
28226 tmp = gen_frame_mem (Pmode, frame_rtx);
28227 emit_move_insn (operands[1], tmp);
28228 frame_rtx = operands[1];
28230 else if (info->push_p)
28231 sp_offset = info->total_size;
28233 tmp = plus_constant (Pmode, frame_rtx,
28234 info->lr_save_offset + sp_offset);
28235 tmp = gen_frame_mem (Pmode, tmp);
28236 emit_move_insn (tmp, operands[0]);
28238 else
28239 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
28241 /* Freeze lr_save_p. We've just emitted rtl that depends on the
28242 state of lr_save_p so any change from here on would be a bug. In
28243 particular, stop rs6000_ra_ever_killed from considering the SET
28244 of lr we may have added just above. */
28245 cfun->machine->lr_save_state = info->lr_save_p + 1;
28248 static GTY(()) alias_set_type set = -1;
28250 alias_set_type
28251 get_TOC_alias_set (void)
28253 if (set == -1)
28254 set = new_alias_set ();
28255 return set;
28258 /* This returns nonzero if the current function uses the TOC. This is
28259 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
28260 is generated by the ABI_V4 load_toc_* patterns. */
28261 #if TARGET_ELF
28262 static int
28263 uses_TOC (void)
28265 rtx_insn *insn;
28267 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
28268 if (INSN_P (insn))
28270 rtx pat = PATTERN (insn);
28271 int i;
28273 if (GET_CODE (pat) == PARALLEL)
28274 for (i = 0; i < XVECLEN (pat, 0); i++)
28276 rtx sub = XVECEXP (pat, 0, i);
28277 if (GET_CODE (sub) == USE)
28279 sub = XEXP (sub, 0);
28280 if (GET_CODE (sub) == UNSPEC
28281 && XINT (sub, 1) == UNSPEC_TOC)
28282 return 1;
28286 return 0;
28288 #endif
28291 create_TOC_reference (rtx symbol, rtx largetoc_reg)
28293 rtx tocrel, tocreg, hi;
28295 if (TARGET_DEBUG_ADDR)
28297 if (GET_CODE (symbol) == SYMBOL_REF)
28298 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
28299 XSTR (symbol, 0));
28300 else
28302 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
28303 GET_RTX_NAME (GET_CODE (symbol)));
28304 debug_rtx (symbol);
28308 if (!can_create_pseudo_p ())
28309 df_set_regs_ever_live (TOC_REGISTER, true);
28311 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
28312 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
28313 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
28314 return tocrel;
28316 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
28317 if (largetoc_reg != NULL)
28319 emit_move_insn (largetoc_reg, hi);
28320 hi = largetoc_reg;
28322 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
28325 /* Issue assembly directives that create a reference to the given DWARF
28326 FRAME_TABLE_LABEL from the current function section. */
28327 void
28328 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
28330 fprintf (asm_out_file, "\t.ref %s\n",
28331 (* targetm.strip_name_encoding) (frame_table_label));
28334 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
28335 and the change to the stack pointer. */
28337 static void
28338 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
28340 rtvec p;
28341 int i;
28342 rtx regs[3];
28344 i = 0;
28345 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
28346 if (hard_frame_needed)
28347 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
28348 if (!(REGNO (fp) == STACK_POINTER_REGNUM
28349 || (hard_frame_needed
28350 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
28351 regs[i++] = fp;
28353 p = rtvec_alloc (i);
28354 while (--i >= 0)
28356 rtx mem = gen_frame_mem (BLKmode, regs[i]);
28357 RTVEC_ELT (p, i) = gen_rtx_SET (mem, const0_rtx);
28360 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
28363 /* Emit the correct code for allocating stack space, as insns.
28364 If COPY_REG, make sure a copy of the old frame is left there.
28365 The generated code may use hard register 0 as a temporary. */
28367 static rtx_insn *
28368 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
28370 rtx_insn *insn;
28371 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
28372 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
28373 rtx todec = gen_int_mode (-size, Pmode);
28374 rtx par, set, mem;
28376 if (INTVAL (todec) != -size)
28378 warning (0, "stack frame too large");
28379 emit_insn (gen_trap ());
28380 return 0;
28383 if (crtl->limit_stack)
28385 if (REG_P (stack_limit_rtx)
28386 && REGNO (stack_limit_rtx) > 1
28387 && REGNO (stack_limit_rtx) <= 31)
28389 emit_insn (gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size)));
28390 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
28391 const0_rtx));
28393 else if (GET_CODE (stack_limit_rtx) == SYMBOL_REF
28394 && TARGET_32BIT
28395 && DEFAULT_ABI == ABI_V4
28396 && !flag_pic)
28398 rtx toload = gen_rtx_CONST (VOIDmode,
28399 gen_rtx_PLUS (Pmode,
28400 stack_limit_rtx,
28401 GEN_INT (size)));
28403 emit_insn (gen_elf_high (tmp_reg, toload));
28404 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
28405 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
28406 const0_rtx));
28408 else
28409 warning (0, "stack limit expression is not supported");
28412 if (copy_reg)
28414 if (copy_off != 0)
28415 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
28416 else
28417 emit_move_insn (copy_reg, stack_reg);
28420 if (size > 32767)
28422 /* Need a note here so that try_split doesn't get confused. */
28423 if (get_last_insn () == NULL_RTX)
28424 emit_note (NOTE_INSN_DELETED);
28425 insn = emit_move_insn (tmp_reg, todec);
28426 try_split (PATTERN (insn), insn, 0);
28427 todec = tmp_reg;
28430 insn = emit_insn (TARGET_32BIT
28431 ? gen_movsi_update_stack (stack_reg, stack_reg,
28432 todec, stack_reg)
28433 : gen_movdi_di_update_stack (stack_reg, stack_reg,
28434 todec, stack_reg));
28435 /* Since we didn't use gen_frame_mem to generate the MEM, grab
28436 it now and set the alias set/attributes. The above gen_*_update
28437 calls will generate a PARALLEL with the MEM set being the first
28438 operation. */
28439 par = PATTERN (insn);
28440 gcc_assert (GET_CODE (par) == PARALLEL);
28441 set = XVECEXP (par, 0, 0);
28442 gcc_assert (GET_CODE (set) == SET);
28443 mem = SET_DEST (set);
28444 gcc_assert (MEM_P (mem));
28445 MEM_NOTRAP_P (mem) = 1;
28446 set_mem_alias_set (mem, get_frame_alias_set ());
28448 RTX_FRAME_RELATED_P (insn) = 1;
28449 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
28450 gen_rtx_SET (stack_reg, gen_rtx_PLUS (Pmode, stack_reg,
28451 GEN_INT (-size))));
28452 return insn;
28455 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
28457 #if PROBE_INTERVAL > 32768
28458 #error Cannot use indexed addressing mode for stack probing
28459 #endif
28461 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
28462 inclusive. These are offsets from the current stack pointer. */
28464 static void
28465 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
28467 /* See if we have a constant small number of probes to generate. If so,
28468 that's the easy case. */
28469 if (first + size <= 32768)
28471 HOST_WIDE_INT i;
28473 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
28474 it exceeds SIZE. If only one probe is needed, this will not
28475 generate any code. Then probe at FIRST + SIZE. */
28476 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
28477 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
28478 -(first + i)));
28480 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
28481 -(first + size)));
28484 /* Otherwise, do the same as above, but in a loop. Note that we must be
28485 extra careful with variables wrapping around because we might be at
28486 the very top (or the very bottom) of the address space and we have
28487 to be able to handle this case properly; in particular, we use an
28488 equality test for the loop condition. */
28489 else
28491 HOST_WIDE_INT rounded_size;
28492 rtx r12 = gen_rtx_REG (Pmode, 12);
28493 rtx r0 = gen_rtx_REG (Pmode, 0);
28495 /* Sanity check for the addressing mode we're going to use. */
28496 gcc_assert (first <= 32768);
28498 /* Step 1: round SIZE to the previous multiple of the interval. */
28500 rounded_size = ROUND_DOWN (size, PROBE_INTERVAL);
28503 /* Step 2: compute initial and final value of the loop counter. */
28505 /* TEST_ADDR = SP + FIRST. */
28506 emit_insn (gen_rtx_SET (r12, plus_constant (Pmode, stack_pointer_rtx,
28507 -first)));
28509 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
28510 if (rounded_size > 32768)
28512 emit_move_insn (r0, GEN_INT (-rounded_size));
28513 emit_insn (gen_rtx_SET (r0, gen_rtx_PLUS (Pmode, r12, r0)));
28515 else
28516 emit_insn (gen_rtx_SET (r0, plus_constant (Pmode, r12,
28517 -rounded_size)));
28520 /* Step 3: the loop
28524 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
28525 probe at TEST_ADDR
28527 while (TEST_ADDR != LAST_ADDR)
28529 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
28530 until it is equal to ROUNDED_SIZE. */
28532 if (TARGET_64BIT)
28533 emit_insn (gen_probe_stack_rangedi (r12, r12, r0));
28534 else
28535 emit_insn (gen_probe_stack_rangesi (r12, r12, r0));
28538 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
28539 that SIZE is equal to ROUNDED_SIZE. */
28541 if (size != rounded_size)
28542 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
28546 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
28547 absolute addresses. */
28549 const char *
28550 output_probe_stack_range (rtx reg1, rtx reg2)
28552 static int labelno = 0;
28553 char loop_lab[32];
28554 rtx xops[2];
28556 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
28558 /* Loop. */
28559 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
28561 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
28562 xops[0] = reg1;
28563 xops[1] = GEN_INT (-PROBE_INTERVAL);
28564 output_asm_insn ("addi %0,%0,%1", xops);
28566 /* Probe at TEST_ADDR. */
28567 xops[1] = gen_rtx_REG (Pmode, 0);
28568 output_asm_insn ("stw %1,0(%0)", xops);
28570 /* Test if TEST_ADDR == LAST_ADDR. */
28571 xops[1] = reg2;
28572 if (TARGET_64BIT)
28573 output_asm_insn ("cmpd 0,%0,%1", xops);
28574 else
28575 output_asm_insn ("cmpw 0,%0,%1", xops);
28577 /* Branch. */
28578 fputs ("\tbne 0,", asm_out_file);
28579 assemble_name_raw (asm_out_file, loop_lab);
28580 fputc ('\n', asm_out_file);
28582 return "";
28585 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
28586 with (plus:P (reg 1) VAL), and with REG2 replaced with REPL2 if REG2
28587 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
28588 deduce these equivalences by itself so it wasn't necessary to hold
28589 its hand so much. Don't be tempted to always supply d2_f_d_e with
28590 the actual cfa register, ie. r31 when we are using a hard frame
28591 pointer. That fails when saving regs off r1, and sched moves the
28592 r31 setup past the reg saves. */
28594 static rtx_insn *
28595 rs6000_frame_related (rtx_insn *insn, rtx reg, HOST_WIDE_INT val,
28596 rtx reg2, rtx repl2)
28598 rtx repl;
28600 if (REGNO (reg) == STACK_POINTER_REGNUM)
28602 gcc_checking_assert (val == 0);
28603 repl = NULL_RTX;
28605 else
28606 repl = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
28607 GEN_INT (val));
28609 rtx pat = PATTERN (insn);
28610 if (!repl && !reg2)
28612 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
28613 if (GET_CODE (pat) == PARALLEL)
28614 for (int i = 0; i < XVECLEN (pat, 0); i++)
28615 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
28617 rtx set = XVECEXP (pat, 0, i);
28619 /* If this PARALLEL has been emitted for out-of-line
28620 register save functions, or store multiple, then omit
28621 eh_frame info for any user-defined global regs. If
28622 eh_frame info is supplied, frame unwinding will
28623 restore a user reg. */
28624 if (!REG_P (SET_SRC (set))
28625 || !fixed_reg_p (REGNO (SET_SRC (set))))
28626 RTX_FRAME_RELATED_P (set) = 1;
28628 RTX_FRAME_RELATED_P (insn) = 1;
28629 return insn;
28632 /* We expect that 'pat' is either a SET or a PARALLEL containing
28633 SETs (and possibly other stuff). In a PARALLEL, all the SETs
28634 are important so they all have to be marked RTX_FRAME_RELATED_P.
28635 Call simplify_replace_rtx on the SETs rather than the whole insn
28636 so as to leave the other stuff alone (for example USE of r12). */
28638 set_used_flags (pat);
28639 if (GET_CODE (pat) == SET)
28641 if (repl)
28642 pat = simplify_replace_rtx (pat, reg, repl);
28643 if (reg2)
28644 pat = simplify_replace_rtx (pat, reg2, repl2);
28646 else if (GET_CODE (pat) == PARALLEL)
28648 pat = shallow_copy_rtx (pat);
28649 XVEC (pat, 0) = shallow_copy_rtvec (XVEC (pat, 0));
28651 for (int i = 0; i < XVECLEN (pat, 0); i++)
28652 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
28654 rtx set = XVECEXP (pat, 0, i);
28656 if (repl)
28657 set = simplify_replace_rtx (set, reg, repl);
28658 if (reg2)
28659 set = simplify_replace_rtx (set, reg2, repl2);
28660 XVECEXP (pat, 0, i) = set;
28662 /* Omit eh_frame info for any user-defined global regs. */
28663 if (!REG_P (SET_SRC (set))
28664 || !fixed_reg_p (REGNO (SET_SRC (set))))
28665 RTX_FRAME_RELATED_P (set) = 1;
28668 else
28669 gcc_unreachable ();
28671 RTX_FRAME_RELATED_P (insn) = 1;
28672 add_reg_note (insn, REG_FRAME_RELATED_EXPR, copy_rtx_if_shared (pat));
28674 return insn;
28677 /* Returns an insn that has a vrsave set operation with the
28678 appropriate CLOBBERs. */
28680 static rtx
28681 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
28683 int nclobs, i;
28684 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
28685 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
28687 clobs[0]
28688 = gen_rtx_SET (vrsave,
28689 gen_rtx_UNSPEC_VOLATILE (SImode,
28690 gen_rtvec (2, reg, vrsave),
28691 UNSPECV_SET_VRSAVE));
28693 nclobs = 1;
28695 /* We need to clobber the registers in the mask so the scheduler
28696 does not move sets to VRSAVE before sets of AltiVec registers.
28698 However, if the function receives nonlocal gotos, reload will set
28699 all call saved registers live. We will end up with:
28701 (set (reg 999) (mem))
28702 (parallel [ (set (reg vrsave) (unspec blah))
28703 (clobber (reg 999))])
28705 The clobber will cause the store into reg 999 to be dead, and
28706 flow will attempt to delete an epilogue insn. In this case, we
28707 need an unspec use/set of the register. */
28709 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
28710 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28712 if (!epiloguep || call_used_regs [i])
28713 clobs[nclobs++] = gen_rtx_CLOBBER (VOIDmode,
28714 gen_rtx_REG (V4SImode, i));
28715 else
28717 rtx reg = gen_rtx_REG (V4SImode, i);
28719 clobs[nclobs++]
28720 = gen_rtx_SET (reg,
28721 gen_rtx_UNSPEC (V4SImode,
28722 gen_rtvec (1, reg), 27));
28726 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
28728 for (i = 0; i < nclobs; ++i)
28729 XVECEXP (insn, 0, i) = clobs[i];
28731 return insn;
28734 static rtx
28735 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
28737 rtx addr, mem;
28739 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
28740 mem = gen_frame_mem (GET_MODE (reg), addr);
28741 return gen_rtx_SET (store ? mem : reg, store ? reg : mem);
28744 static rtx
28745 gen_frame_load (rtx reg, rtx frame_reg, int offset)
28747 return gen_frame_set (reg, frame_reg, offset, false);
28750 static rtx
28751 gen_frame_store (rtx reg, rtx frame_reg, int offset)
28753 return gen_frame_set (reg, frame_reg, offset, true);
28756 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
28757 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
28759 static rtx_insn *
28760 emit_frame_save (rtx frame_reg, machine_mode mode,
28761 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
28763 rtx reg;
28765 /* Some cases that need register indexed addressing. */
28766 gcc_checking_assert (!((TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
28767 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
28768 || (TARGET_E500_DOUBLE && mode == DFmode)
28769 || (TARGET_SPE_ABI
28770 && SPE_VECTOR_MODE (mode)
28771 && !SPE_CONST_OFFSET_OK (offset))));
28773 reg = gen_rtx_REG (mode, regno);
28774 rtx_insn *insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
28775 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
28776 NULL_RTX, NULL_RTX);
28779 /* Emit an offset memory reference suitable for a frame store, while
28780 converting to a valid addressing mode. */
28782 static rtx
28783 gen_frame_mem_offset (machine_mode mode, rtx reg, int offset)
28785 rtx int_rtx, offset_rtx;
28787 int_rtx = GEN_INT (offset);
28789 if ((TARGET_SPE_ABI && SPE_VECTOR_MODE (mode) && !SPE_CONST_OFFSET_OK (offset))
28790 || (TARGET_E500_DOUBLE && mode == DFmode))
28792 offset_rtx = gen_rtx_REG (Pmode, FIXED_SCRATCH);
28793 emit_move_insn (offset_rtx, int_rtx);
28795 else
28796 offset_rtx = int_rtx;
28798 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, offset_rtx));
28801 #ifndef TARGET_FIX_AND_CONTINUE
28802 #define TARGET_FIX_AND_CONTINUE 0
28803 #endif
28805 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
28806 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
28807 #define LAST_SAVRES_REGISTER 31
28808 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
28810 enum {
28811 SAVRES_LR = 0x1,
28812 SAVRES_SAVE = 0x2,
28813 SAVRES_REG = 0x0c,
28814 SAVRES_GPR = 0,
28815 SAVRES_FPR = 4,
28816 SAVRES_VR = 8
28819 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
28821 /* Temporary holding space for an out-of-line register save/restore
28822 routine name. */
28823 static char savres_routine_name[30];
28825 /* Return the name for an out-of-line register save/restore routine.
28826 We are saving/restoring GPRs if GPR is true. */
28828 static char *
28829 rs6000_savres_routine_name (rs6000_stack_t *info, int regno, int sel)
28831 const char *prefix = "";
28832 const char *suffix = "";
28834 /* Different targets are supposed to define
28835 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
28836 routine name could be defined with:
28838 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
28840 This is a nice idea in practice, but in reality, things are
28841 complicated in several ways:
28843 - ELF targets have save/restore routines for GPRs.
28845 - SPE targets use different prefixes for 32/64-bit registers, and
28846 neither of them fit neatly in the FOO_{PREFIX,SUFFIX} regimen.
28848 - PPC64 ELF targets have routines for save/restore of GPRs that
28849 differ in what they do with the link register, so having a set
28850 prefix doesn't work. (We only use one of the save routines at
28851 the moment, though.)
28853 - PPC32 elf targets have "exit" versions of the restore routines
28854 that restore the link register and can save some extra space.
28855 These require an extra suffix. (There are also "tail" versions
28856 of the restore routines and "GOT" versions of the save routines,
28857 but we don't generate those at present. Same problems apply,
28858 though.)
28860 We deal with all this by synthesizing our own prefix/suffix and
28861 using that for the simple sprintf call shown above. */
28862 if (TARGET_SPE)
28864 /* No floating point saves on the SPE. */
28865 gcc_assert ((sel & SAVRES_REG) == SAVRES_GPR);
28867 if ((sel & SAVRES_SAVE))
28868 prefix = info->spe_64bit_regs_used ? "_save64gpr_" : "_save32gpr_";
28869 else
28870 prefix = info->spe_64bit_regs_used ? "_rest64gpr_" : "_rest32gpr_";
28872 if ((sel & SAVRES_LR))
28873 suffix = "_x";
28875 else if (DEFAULT_ABI == ABI_V4)
28877 if (TARGET_64BIT)
28878 goto aix_names;
28880 if ((sel & SAVRES_REG) == SAVRES_GPR)
28881 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
28882 else if ((sel & SAVRES_REG) == SAVRES_FPR)
28883 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
28884 else if ((sel & SAVRES_REG) == SAVRES_VR)
28885 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
28886 else
28887 abort ();
28889 if ((sel & SAVRES_LR))
28890 suffix = "_x";
28892 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
28894 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
28895 /* No out-of-line save/restore routines for GPRs on AIX. */
28896 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
28897 #endif
28899 aix_names:
28900 if ((sel & SAVRES_REG) == SAVRES_GPR)
28901 prefix = ((sel & SAVRES_SAVE)
28902 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
28903 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
28904 else if ((sel & SAVRES_REG) == SAVRES_FPR)
28906 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
28907 if ((sel & SAVRES_LR))
28908 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
28909 else
28910 #endif
28912 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
28913 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
28916 else if ((sel & SAVRES_REG) == SAVRES_VR)
28917 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
28918 else
28919 abort ();
28922 if (DEFAULT_ABI == ABI_DARWIN)
28924 /* The Darwin approach is (slightly) different, in order to be
28925 compatible with code generated by the system toolchain. There is a
28926 single symbol for the start of save sequence, and the code here
28927 embeds an offset into that code on the basis of the first register
28928 to be saved. */
28929 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
28930 if ((sel & SAVRES_REG) == SAVRES_GPR)
28931 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
28932 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
28933 (regno - 13) * 4, prefix, regno);
28934 else if ((sel & SAVRES_REG) == SAVRES_FPR)
28935 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
28936 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
28937 else if ((sel & SAVRES_REG) == SAVRES_VR)
28938 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
28939 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
28940 else
28941 abort ();
28943 else
28944 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
28946 return savres_routine_name;
28949 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
28950 We are saving/restoring GPRs if GPR is true. */
28952 static rtx
28953 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
28955 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
28956 ? info->first_gp_reg_save
28957 : (sel & SAVRES_REG) == SAVRES_FPR
28958 ? info->first_fp_reg_save - 32
28959 : (sel & SAVRES_REG) == SAVRES_VR
28960 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
28961 : -1);
28962 rtx sym;
28963 int select = sel;
28965 /* On the SPE, we never have any FPRs, but we do have 32/64-bit
28966 versions of the gpr routines. */
28967 if (TARGET_SPE_ABI && (sel & SAVRES_REG) == SAVRES_GPR
28968 && info->spe_64bit_regs_used)
28969 select ^= SAVRES_FPR ^ SAVRES_GPR;
28971 /* Don't generate bogus routine names. */
28972 gcc_assert (FIRST_SAVRES_REGISTER <= regno
28973 && regno <= LAST_SAVRES_REGISTER
28974 && select >= 0 && select <= 12);
28976 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
28978 if (sym == NULL)
28980 char *name;
28982 name = rs6000_savres_routine_name (info, regno, sel);
28984 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
28985 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
28986 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
28989 return sym;
28992 /* Emit a sequence of insns, including a stack tie if needed, for
28993 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
28994 reset the stack pointer, but move the base of the frame into
28995 reg UPDT_REGNO for use by out-of-line register restore routines. */
28997 static rtx
28998 rs6000_emit_stack_reset (rs6000_stack_t *info,
28999 rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
29000 unsigned updt_regno)
29002 /* If there is nothing to do, don't do anything. */
29003 if (frame_off == 0 && REGNO (frame_reg_rtx) == updt_regno)
29004 return NULL_RTX;
29006 rtx updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
29008 /* This blockage is needed so that sched doesn't decide to move
29009 the sp change before the register restores. */
29010 if (DEFAULT_ABI == ABI_V4
29011 || (TARGET_SPE_ABI
29012 && info->spe_64bit_regs_used != 0
29013 && info->first_gp_reg_save != 32))
29014 return emit_insn (gen_stack_restore_tie (updt_reg_rtx, frame_reg_rtx,
29015 GEN_INT (frame_off)));
29017 /* If we are restoring registers out-of-line, we will be using the
29018 "exit" variants of the restore routines, which will reset the
29019 stack for us. But we do need to point updt_reg into the
29020 right place for those routines. */
29021 if (frame_off != 0)
29022 return emit_insn (gen_add3_insn (updt_reg_rtx,
29023 frame_reg_rtx, GEN_INT (frame_off)));
29024 else
29025 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
29027 return NULL_RTX;
29030 /* Return the register number used as a pointer by out-of-line
29031 save/restore functions. */
29033 static inline unsigned
29034 ptr_regno_for_savres (int sel)
29036 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
29037 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
29038 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
29041 /* Construct a parallel rtx describing the effect of a call to an
29042 out-of-line register save/restore routine, and emit the insn
29043 or jump_insn as appropriate. */
29045 static rtx_insn *
29046 rs6000_emit_savres_rtx (rs6000_stack_t *info,
29047 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
29048 machine_mode reg_mode, int sel)
29050 int i;
29051 int offset, start_reg, end_reg, n_regs, use_reg;
29052 int reg_size = GET_MODE_SIZE (reg_mode);
29053 rtx sym;
29054 rtvec p;
29055 rtx par;
29056 rtx_insn *insn;
29058 offset = 0;
29059 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
29060 ? info->first_gp_reg_save
29061 : (sel & SAVRES_REG) == SAVRES_FPR
29062 ? info->first_fp_reg_save
29063 : (sel & SAVRES_REG) == SAVRES_VR
29064 ? info->first_altivec_reg_save
29065 : -1);
29066 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
29067 ? 32
29068 : (sel & SAVRES_REG) == SAVRES_FPR
29069 ? 64
29070 : (sel & SAVRES_REG) == SAVRES_VR
29071 ? LAST_ALTIVEC_REGNO + 1
29072 : -1);
29073 n_regs = end_reg - start_reg;
29074 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
29075 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
29076 + n_regs);
29078 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
29079 RTVEC_ELT (p, offset++) = ret_rtx;
29081 RTVEC_ELT (p, offset++)
29082 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
29084 sym = rs6000_savres_routine_sym (info, sel);
29085 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
29087 use_reg = ptr_regno_for_savres (sel);
29088 if ((sel & SAVRES_REG) == SAVRES_VR)
29090 /* Vector regs are saved/restored using [reg+reg] addressing. */
29091 RTVEC_ELT (p, offset++)
29092 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, use_reg));
29093 RTVEC_ELT (p, offset++)
29094 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
29096 else
29097 RTVEC_ELT (p, offset++)
29098 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
29100 for (i = 0; i < end_reg - start_reg; i++)
29101 RTVEC_ELT (p, i + offset)
29102 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
29103 frame_reg_rtx, save_area_offset + reg_size * i,
29104 (sel & SAVRES_SAVE) != 0);
29106 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
29107 RTVEC_ELT (p, i + offset)
29108 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
29110 par = gen_rtx_PARALLEL (VOIDmode, p);
29112 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
29114 insn = emit_jump_insn (par);
29115 JUMP_LABEL (insn) = ret_rtx;
29117 else
29118 insn = emit_insn (par);
29119 return insn;
29122 /* Emit code to store CR fields that need to be saved into REG. */
29124 static void
29125 rs6000_emit_move_from_cr (rtx reg)
29127 /* Only the ELFv2 ABI allows storing only selected fields. */
29128 if (DEFAULT_ABI == ABI_ELFv2 && TARGET_MFCRF)
29130 int i, cr_reg[8], count = 0;
29132 /* Collect CR fields that must be saved. */
29133 for (i = 0; i < 8; i++)
29134 if (save_reg_p (CR0_REGNO + i))
29135 cr_reg[count++] = i;
29137 /* If it's just a single one, use mfcrf. */
29138 if (count == 1)
29140 rtvec p = rtvec_alloc (1);
29141 rtvec r = rtvec_alloc (2);
29142 RTVEC_ELT (r, 0) = gen_rtx_REG (CCmode, CR0_REGNO + cr_reg[0]);
29143 RTVEC_ELT (r, 1) = GEN_INT (1 << (7 - cr_reg[0]));
29144 RTVEC_ELT (p, 0)
29145 = gen_rtx_SET (reg,
29146 gen_rtx_UNSPEC (SImode, r, UNSPEC_MOVESI_FROM_CR));
29148 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
29149 return;
29152 /* ??? It might be better to handle count == 2 / 3 cases here
29153 as well, using logical operations to combine the values. */
29156 emit_insn (gen_movesi_from_cr (reg));
29159 /* Return whether the split-stack arg pointer (r12) is used. */
29161 static bool
29162 split_stack_arg_pointer_used_p (void)
29164 /* If the pseudo holding the arg pointer is no longer a pseudo,
29165 then the arg pointer is used. */
29166 if (cfun->machine->split_stack_arg_pointer != NULL_RTX
29167 && (!REG_P (cfun->machine->split_stack_arg_pointer)
29168 || (REGNO (cfun->machine->split_stack_arg_pointer)
29169 < FIRST_PSEUDO_REGISTER)))
29170 return true;
29172 /* Unfortunately we also need to do some code scanning, since
29173 r12 may have been substituted for the pseudo. */
29174 rtx_insn *insn;
29175 basic_block bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
29176 FOR_BB_INSNS (bb, insn)
29177 if (NONDEBUG_INSN_P (insn))
29179 /* A call destroys r12. */
29180 if (CALL_P (insn))
29181 return false;
29183 df_ref use;
29184 FOR_EACH_INSN_USE (use, insn)
29186 rtx x = DF_REF_REG (use);
29187 if (REG_P (x) && REGNO (x) == 12)
29188 return true;
29190 df_ref def;
29191 FOR_EACH_INSN_DEF (def, insn)
29193 rtx x = DF_REF_REG (def);
29194 if (REG_P (x) && REGNO (x) == 12)
29195 return false;
29198 return bitmap_bit_p (DF_LR_OUT (bb), 12);
29201 /* Return whether we need to emit an ELFv2 global entry point prologue. */
29203 static bool
29204 rs6000_global_entry_point_needed_p (void)
29206 /* Only needed for the ELFv2 ABI. */
29207 if (DEFAULT_ABI != ABI_ELFv2)
29208 return false;
29210 /* With -msingle-pic-base, we assume the whole program shares the same
29211 TOC, so no global entry point prologues are needed anywhere. */
29212 if (TARGET_SINGLE_PIC_BASE)
29213 return false;
29215 /* Ensure we have a global entry point for thunks. ??? We could
29216 avoid that if the target routine doesn't need a global entry point,
29217 but we do not know whether this is the case at this point. */
29218 if (cfun->is_thunk)
29219 return true;
29221 /* For regular functions, rs6000_emit_prologue sets this flag if the
29222 routine ever uses the TOC pointer. */
29223 return cfun->machine->r2_setup_needed;
29226 /* Implement TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS. */
29227 static sbitmap
29228 rs6000_get_separate_components (void)
29230 rs6000_stack_t *info = rs6000_stack_info ();
29232 if (WORLD_SAVE_P (info))
29233 return NULL;
29235 if (TARGET_SPE_ABI)
29236 return NULL;
29238 gcc_assert (!(info->savres_strategy & SAVE_MULTIPLE)
29239 && !(info->savres_strategy & REST_MULTIPLE));
29241 /* Component 0 is the save/restore of LR (done via GPR0).
29242 Components 13..31 are the save/restore of GPR13..GPR31.
29243 Components 46..63 are the save/restore of FPR14..FPR31. */
29245 cfun->machine->n_components = 64;
29247 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
29248 bitmap_clear (components);
29250 int reg_size = TARGET_32BIT ? 4 : 8;
29251 int fp_reg_size = 8;
29253 /* The GPRs we need saved to the frame. */
29254 if ((info->savres_strategy & SAVE_INLINE_GPRS)
29255 && (info->savres_strategy & REST_INLINE_GPRS))
29257 int offset = info->gp_save_offset;
29258 if (info->push_p)
29259 offset += info->total_size;
29261 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
29263 if (IN_RANGE (offset, -0x8000, 0x7fff)
29264 && rs6000_reg_live_or_pic_offset_p (regno))
29265 bitmap_set_bit (components, regno);
29267 offset += reg_size;
29271 /* Don't mess with the hard frame pointer. */
29272 if (frame_pointer_needed)
29273 bitmap_clear_bit (components, HARD_FRAME_POINTER_REGNUM);
29275 /* Don't mess with the fixed TOC register. */
29276 if ((TARGET_TOC && TARGET_MINIMAL_TOC)
29277 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
29278 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
29279 bitmap_clear_bit (components, RS6000_PIC_OFFSET_TABLE_REGNUM);
29281 /* The FPRs we need saved to the frame. */
29282 if ((info->savres_strategy & SAVE_INLINE_FPRS)
29283 && (info->savres_strategy & REST_INLINE_FPRS))
29285 int offset = info->fp_save_offset;
29286 if (info->push_p)
29287 offset += info->total_size;
29289 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
29291 if (IN_RANGE (offset, -0x8000, 0x7fff) && save_reg_p (regno))
29292 bitmap_set_bit (components, regno);
29294 offset += fp_reg_size;
29298 /* Optimize LR save and restore if we can. This is component 0. Any
29299 out-of-line register save/restore routines need LR. */
29300 if (info->lr_save_p
29301 && !(flag_pic && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
29302 && (info->savres_strategy & SAVE_INLINE_GPRS)
29303 && (info->savres_strategy & REST_INLINE_GPRS)
29304 && (info->savres_strategy & SAVE_INLINE_FPRS)
29305 && (info->savres_strategy & REST_INLINE_FPRS)
29306 && (info->savres_strategy & SAVE_INLINE_VRS)
29307 && (info->savres_strategy & REST_INLINE_VRS))
29309 int offset = info->lr_save_offset;
29310 if (info->push_p)
29311 offset += info->total_size;
29312 if (IN_RANGE (offset, -0x8000, 0x7fff))
29313 bitmap_set_bit (components, 0);
29316 return components;
29319 /* Implement TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB. */
29320 static sbitmap
29321 rs6000_components_for_bb (basic_block bb)
29323 rs6000_stack_t *info = rs6000_stack_info ();
29325 bitmap in = DF_LIVE_IN (bb);
29326 bitmap gen = &DF_LIVE_BB_INFO (bb)->gen;
29327 bitmap kill = &DF_LIVE_BB_INFO (bb)->kill;
29329 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
29330 bitmap_clear (components);
29332 /* A register is used in a bb if it is in the IN, GEN, or KILL sets. */
29334 /* GPRs. */
29335 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
29336 if (bitmap_bit_p (in, regno)
29337 || bitmap_bit_p (gen, regno)
29338 || bitmap_bit_p (kill, regno))
29339 bitmap_set_bit (components, regno);
29341 /* FPRs. */
29342 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
29343 if (bitmap_bit_p (in, regno)
29344 || bitmap_bit_p (gen, regno)
29345 || bitmap_bit_p (kill, regno))
29346 bitmap_set_bit (components, regno);
29348 /* The link register. */
29349 if (bitmap_bit_p (in, LR_REGNO)
29350 || bitmap_bit_p (gen, LR_REGNO)
29351 || bitmap_bit_p (kill, LR_REGNO))
29352 bitmap_set_bit (components, 0);
29354 return components;
29357 /* Implement TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS. */
29358 static void
29359 rs6000_disqualify_components (sbitmap components, edge e,
29360 sbitmap edge_components, bool /*is_prologue*/)
29362 /* Our LR pro/epilogue code moves LR via R0, so R0 had better not be
29363 live where we want to place that code. */
29364 if (bitmap_bit_p (edge_components, 0)
29365 && bitmap_bit_p (DF_LIVE_IN (e->dest), 0))
29367 if (dump_file)
29368 fprintf (dump_file, "Disqualifying LR because GPR0 is live "
29369 "on entry to bb %d\n", e->dest->index);
29370 bitmap_clear_bit (components, 0);
29374 /* Implement TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS. */
29375 static void
29376 rs6000_emit_prologue_components (sbitmap components)
29378 rs6000_stack_t *info = rs6000_stack_info ();
29379 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
29380 ? HARD_FRAME_POINTER_REGNUM
29381 : STACK_POINTER_REGNUM);
29383 machine_mode reg_mode = Pmode;
29384 int reg_size = TARGET_32BIT ? 4 : 8;
29385 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
29386 ? DFmode : SFmode;
29387 int fp_reg_size = 8;
29389 /* Prologue for LR. */
29390 if (bitmap_bit_p (components, 0))
29392 rtx reg = gen_rtx_REG (reg_mode, 0);
29393 rtx_insn *insn = emit_move_insn (reg, gen_rtx_REG (reg_mode, LR_REGNO));
29394 RTX_FRAME_RELATED_P (insn) = 1;
29395 add_reg_note (insn, REG_CFA_REGISTER, NULL);
29397 int offset = info->lr_save_offset;
29398 if (info->push_p)
29399 offset += info->total_size;
29401 insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
29402 RTX_FRAME_RELATED_P (insn) = 1;
29403 rtx lr = gen_rtx_REG (reg_mode, LR_REGNO);
29404 rtx mem = copy_rtx (SET_DEST (single_set (insn)));
29405 add_reg_note (insn, REG_CFA_OFFSET, gen_rtx_SET (mem, lr));
29408 /* Prologue for the GPRs. */
29409 int offset = info->gp_save_offset;
29410 if (info->push_p)
29411 offset += info->total_size;
29413 for (int i = info->first_gp_reg_save; i < 32; i++)
29415 if (bitmap_bit_p (components, i))
29417 rtx reg = gen_rtx_REG (reg_mode, i);
29418 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
29419 RTX_FRAME_RELATED_P (insn) = 1;
29420 rtx set = copy_rtx (single_set (insn));
29421 add_reg_note (insn, REG_CFA_OFFSET, set);
29424 offset += reg_size;
29427 /* Prologue for the FPRs. */
29428 offset = info->fp_save_offset;
29429 if (info->push_p)
29430 offset += info->total_size;
29432 for (int i = info->first_fp_reg_save; i < 64; i++)
29434 if (bitmap_bit_p (components, i))
29436 rtx reg = gen_rtx_REG (fp_reg_mode, i);
29437 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
29438 RTX_FRAME_RELATED_P (insn) = 1;
29439 rtx set = copy_rtx (single_set (insn));
29440 add_reg_note (insn, REG_CFA_OFFSET, set);
29443 offset += fp_reg_size;
29447 /* Implement TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS. */
29448 static void
29449 rs6000_emit_epilogue_components (sbitmap components)
29451 rs6000_stack_t *info = rs6000_stack_info ();
29452 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
29453 ? HARD_FRAME_POINTER_REGNUM
29454 : STACK_POINTER_REGNUM);
29456 machine_mode reg_mode = Pmode;
29457 int reg_size = TARGET_32BIT ? 4 : 8;
29459 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
29460 ? DFmode : SFmode;
29461 int fp_reg_size = 8;
29463 /* Epilogue for the FPRs. */
29464 int offset = info->fp_save_offset;
29465 if (info->push_p)
29466 offset += info->total_size;
29468 for (int i = info->first_fp_reg_save; i < 64; i++)
29470 if (bitmap_bit_p (components, i))
29472 rtx reg = gen_rtx_REG (fp_reg_mode, i);
29473 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
29474 RTX_FRAME_RELATED_P (insn) = 1;
29475 add_reg_note (insn, REG_CFA_RESTORE, reg);
29478 offset += fp_reg_size;
29481 /* Epilogue for the GPRs. */
29482 offset = info->gp_save_offset;
29483 if (info->push_p)
29484 offset += info->total_size;
29486 for (int i = info->first_gp_reg_save; i < 32; i++)
29488 if (bitmap_bit_p (components, i))
29490 rtx reg = gen_rtx_REG (reg_mode, i);
29491 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
29492 RTX_FRAME_RELATED_P (insn) = 1;
29493 add_reg_note (insn, REG_CFA_RESTORE, reg);
29496 offset += reg_size;
29499 /* Epilogue for LR. */
29500 if (bitmap_bit_p (components, 0))
29502 int offset = info->lr_save_offset;
29503 if (info->push_p)
29504 offset += info->total_size;
29506 rtx reg = gen_rtx_REG (reg_mode, 0);
29507 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
29509 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
29510 insn = emit_move_insn (lr, reg);
29511 RTX_FRAME_RELATED_P (insn) = 1;
29512 add_reg_note (insn, REG_CFA_RESTORE, lr);
29516 /* Implement TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS. */
29517 static void
29518 rs6000_set_handled_components (sbitmap components)
29520 rs6000_stack_t *info = rs6000_stack_info ();
29522 for (int i = info->first_gp_reg_save; i < 32; i++)
29523 if (bitmap_bit_p (components, i))
29524 cfun->machine->gpr_is_wrapped_separately[i] = true;
29526 for (int i = info->first_fp_reg_save; i < 64; i++)
29527 if (bitmap_bit_p (components, i))
29528 cfun->machine->fpr_is_wrapped_separately[i - 32] = true;
29530 if (bitmap_bit_p (components, 0))
29531 cfun->machine->lr_is_wrapped_separately = true;
29534 /* Emit function prologue as insns. */
29536 void
29537 rs6000_emit_prologue (void)
29539 rs6000_stack_t *info = rs6000_stack_info ();
29540 machine_mode reg_mode = Pmode;
29541 int reg_size = TARGET_32BIT ? 4 : 8;
29542 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
29543 ? DFmode : SFmode;
29544 int fp_reg_size = 8;
29545 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29546 rtx frame_reg_rtx = sp_reg_rtx;
29547 unsigned int cr_save_regno;
29548 rtx cr_save_rtx = NULL_RTX;
29549 rtx_insn *insn;
29550 int strategy;
29551 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
29552 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
29553 && call_used_regs[STATIC_CHAIN_REGNUM]);
29554 int using_split_stack = (flag_split_stack
29555 && (lookup_attribute ("no_split_stack",
29556 DECL_ATTRIBUTES (cfun->decl))
29557 == NULL));
29559 /* Offset to top of frame for frame_reg and sp respectively. */
29560 HOST_WIDE_INT frame_off = 0;
29561 HOST_WIDE_INT sp_off = 0;
29562 /* sp_adjust is the stack adjusting instruction, tracked so that the
29563 insn setting up the split-stack arg pointer can be emitted just
29564 prior to it, when r12 is not used here for other purposes. */
29565 rtx_insn *sp_adjust = 0;
29567 #if CHECKING_P
29568 /* Track and check usage of r0, r11, r12. */
29569 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
29570 #define START_USE(R) do \
29572 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
29573 reg_inuse |= 1 << (R); \
29574 } while (0)
29575 #define END_USE(R) do \
29577 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
29578 reg_inuse &= ~(1 << (R)); \
29579 } while (0)
29580 #define NOT_INUSE(R) do \
29582 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
29583 } while (0)
29584 #else
29585 #define START_USE(R) do {} while (0)
29586 #define END_USE(R) do {} while (0)
29587 #define NOT_INUSE(R) do {} while (0)
29588 #endif
29590 if (DEFAULT_ABI == ABI_ELFv2
29591 && !TARGET_SINGLE_PIC_BASE)
29593 cfun->machine->r2_setup_needed = df_regs_ever_live_p (TOC_REGNUM);
29595 /* With -mminimal-toc we may generate an extra use of r2 below. */
29596 if (TARGET_TOC && TARGET_MINIMAL_TOC
29597 && !constant_pool_empty_p ())
29598 cfun->machine->r2_setup_needed = true;
29602 if (flag_stack_usage_info)
29603 current_function_static_stack_size = info->total_size;
29605 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
29607 HOST_WIDE_INT size = info->total_size;
29609 if (crtl->is_leaf && !cfun->calls_alloca)
29611 if (size > PROBE_INTERVAL && size > STACK_CHECK_PROTECT)
29612 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT,
29613 size - STACK_CHECK_PROTECT);
29615 else if (size > 0)
29616 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
29619 if (TARGET_FIX_AND_CONTINUE)
29621 /* gdb on darwin arranges to forward a function from the old
29622 address by modifying the first 5 instructions of the function
29623 to branch to the overriding function. This is necessary to
29624 permit function pointers that point to the old function to
29625 actually forward to the new function. */
29626 emit_insn (gen_nop ());
29627 emit_insn (gen_nop ());
29628 emit_insn (gen_nop ());
29629 emit_insn (gen_nop ());
29630 emit_insn (gen_nop ());
29633 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
29635 reg_mode = V2SImode;
29636 reg_size = 8;
29639 /* Handle world saves specially here. */
29640 if (WORLD_SAVE_P (info))
29642 int i, j, sz;
29643 rtx treg;
29644 rtvec p;
29645 rtx reg0;
29647 /* save_world expects lr in r0. */
29648 reg0 = gen_rtx_REG (Pmode, 0);
29649 if (info->lr_save_p)
29651 insn = emit_move_insn (reg0,
29652 gen_rtx_REG (Pmode, LR_REGNO));
29653 RTX_FRAME_RELATED_P (insn) = 1;
29656 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
29657 assumptions about the offsets of various bits of the stack
29658 frame. */
29659 gcc_assert (info->gp_save_offset == -220
29660 && info->fp_save_offset == -144
29661 && info->lr_save_offset == 8
29662 && info->cr_save_offset == 4
29663 && info->push_p
29664 && info->lr_save_p
29665 && (!crtl->calls_eh_return
29666 || info->ehrd_offset == -432)
29667 && info->vrsave_save_offset == -224
29668 && info->altivec_save_offset == -416);
29670 treg = gen_rtx_REG (SImode, 11);
29671 emit_move_insn (treg, GEN_INT (-info->total_size));
29673 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
29674 in R11. It also clobbers R12, so beware! */
29676 /* Preserve CR2 for save_world prologues */
29677 sz = 5;
29678 sz += 32 - info->first_gp_reg_save;
29679 sz += 64 - info->first_fp_reg_save;
29680 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
29681 p = rtvec_alloc (sz);
29682 j = 0;
29683 RTVEC_ELT (p, j++) = gen_rtx_CLOBBER (VOIDmode,
29684 gen_rtx_REG (SImode,
29685 LR_REGNO));
29686 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
29687 gen_rtx_SYMBOL_REF (Pmode,
29688 "*save_world"));
29689 /* We do floats first so that the instruction pattern matches
29690 properly. */
29691 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
29692 RTVEC_ELT (p, j++)
29693 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
29694 ? DFmode : SFmode,
29695 info->first_fp_reg_save + i),
29696 frame_reg_rtx,
29697 info->fp_save_offset + frame_off + 8 * i);
29698 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
29699 RTVEC_ELT (p, j++)
29700 = gen_frame_store (gen_rtx_REG (V4SImode,
29701 info->first_altivec_reg_save + i),
29702 frame_reg_rtx,
29703 info->altivec_save_offset + frame_off + 16 * i);
29704 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
29705 RTVEC_ELT (p, j++)
29706 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
29707 frame_reg_rtx,
29708 info->gp_save_offset + frame_off + reg_size * i);
29710 /* CR register traditionally saved as CR2. */
29711 RTVEC_ELT (p, j++)
29712 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
29713 frame_reg_rtx, info->cr_save_offset + frame_off);
29714 /* Explain about use of R0. */
29715 if (info->lr_save_p)
29716 RTVEC_ELT (p, j++)
29717 = gen_frame_store (reg0,
29718 frame_reg_rtx, info->lr_save_offset + frame_off);
29719 /* Explain what happens to the stack pointer. */
29721 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
29722 RTVEC_ELT (p, j++) = gen_rtx_SET (sp_reg_rtx, newval);
29725 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
29726 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
29727 treg, GEN_INT (-info->total_size));
29728 sp_off = frame_off = info->total_size;
29731 strategy = info->savres_strategy;
29733 /* For V.4, update stack before we do any saving and set back pointer. */
29734 if (! WORLD_SAVE_P (info)
29735 && info->push_p
29736 && (DEFAULT_ABI == ABI_V4
29737 || crtl->calls_eh_return))
29739 bool need_r11 = (TARGET_SPE
29740 ? (!(strategy & SAVE_INLINE_GPRS)
29741 && info->spe_64bit_regs_used == 0)
29742 : (!(strategy & SAVE_INLINE_FPRS)
29743 || !(strategy & SAVE_INLINE_GPRS)
29744 || !(strategy & SAVE_INLINE_VRS)));
29745 int ptr_regno = -1;
29746 rtx ptr_reg = NULL_RTX;
29747 int ptr_off = 0;
29749 if (info->total_size < 32767)
29750 frame_off = info->total_size;
29751 else if (need_r11)
29752 ptr_regno = 11;
29753 else if (info->cr_save_p
29754 || info->lr_save_p
29755 || info->first_fp_reg_save < 64
29756 || info->first_gp_reg_save < 32
29757 || info->altivec_size != 0
29758 || info->vrsave_size != 0
29759 || crtl->calls_eh_return)
29760 ptr_regno = 12;
29761 else
29763 /* The prologue won't be saving any regs so there is no need
29764 to set up a frame register to access any frame save area.
29765 We also won't be using frame_off anywhere below, but set
29766 the correct value anyway to protect against future
29767 changes to this function. */
29768 frame_off = info->total_size;
29770 if (ptr_regno != -1)
29772 /* Set up the frame offset to that needed by the first
29773 out-of-line save function. */
29774 START_USE (ptr_regno);
29775 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
29776 frame_reg_rtx = ptr_reg;
29777 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
29778 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
29779 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
29780 ptr_off = info->gp_save_offset + info->gp_size;
29781 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
29782 ptr_off = info->altivec_save_offset + info->altivec_size;
29783 frame_off = -ptr_off;
29785 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
29786 ptr_reg, ptr_off);
29787 if (REGNO (frame_reg_rtx) == 12)
29788 sp_adjust = 0;
29789 sp_off = info->total_size;
29790 if (frame_reg_rtx != sp_reg_rtx)
29791 rs6000_emit_stack_tie (frame_reg_rtx, false);
29794 /* If we use the link register, get it into r0. */
29795 if (!WORLD_SAVE_P (info) && info->lr_save_p
29796 && !cfun->machine->lr_is_wrapped_separately)
29798 rtx addr, reg, mem;
29800 reg = gen_rtx_REG (Pmode, 0);
29801 START_USE (0);
29802 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
29803 RTX_FRAME_RELATED_P (insn) = 1;
29805 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
29806 | SAVE_NOINLINE_FPRS_SAVES_LR)))
29808 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
29809 GEN_INT (info->lr_save_offset + frame_off));
29810 mem = gen_rtx_MEM (Pmode, addr);
29811 /* This should not be of rs6000_sr_alias_set, because of
29812 __builtin_return_address. */
29814 insn = emit_move_insn (mem, reg);
29815 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
29816 NULL_RTX, NULL_RTX);
29817 END_USE (0);
29821 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
29822 r12 will be needed by out-of-line gpr restore. */
29823 cr_save_regno = ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
29824 && !(strategy & (SAVE_INLINE_GPRS
29825 | SAVE_NOINLINE_GPRS_SAVES_LR))
29826 ? 11 : 12);
29827 if (!WORLD_SAVE_P (info)
29828 && info->cr_save_p
29829 && REGNO (frame_reg_rtx) != cr_save_regno
29830 && !(using_static_chain_p && cr_save_regno == 11)
29831 && !(using_split_stack && cr_save_regno == 12 && sp_adjust))
29833 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
29834 START_USE (cr_save_regno);
29835 rs6000_emit_move_from_cr (cr_save_rtx);
29838 /* Do any required saving of fpr's. If only one or two to save, do
29839 it ourselves. Otherwise, call function. */
29840 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
29842 int offset = info->fp_save_offset + frame_off;
29843 for (int i = info->first_fp_reg_save; i < 64; i++)
29845 if (save_reg_p (i)
29846 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
29847 emit_frame_save (frame_reg_rtx, fp_reg_mode, i, offset,
29848 sp_off - frame_off);
29850 offset += fp_reg_size;
29853 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
29855 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
29856 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
29857 unsigned ptr_regno = ptr_regno_for_savres (sel);
29858 rtx ptr_reg = frame_reg_rtx;
29860 if (REGNO (frame_reg_rtx) == ptr_regno)
29861 gcc_checking_assert (frame_off == 0);
29862 else
29864 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
29865 NOT_INUSE (ptr_regno);
29866 emit_insn (gen_add3_insn (ptr_reg,
29867 frame_reg_rtx, GEN_INT (frame_off)));
29869 insn = rs6000_emit_savres_rtx (info, ptr_reg,
29870 info->fp_save_offset,
29871 info->lr_save_offset,
29872 DFmode, sel);
29873 rs6000_frame_related (insn, ptr_reg, sp_off,
29874 NULL_RTX, NULL_RTX);
29875 if (lr)
29876 END_USE (0);
29879 /* Save GPRs. This is done as a PARALLEL if we are using
29880 the store-multiple instructions. */
29881 if (!WORLD_SAVE_P (info)
29882 && TARGET_SPE_ABI
29883 && info->spe_64bit_regs_used != 0
29884 && info->first_gp_reg_save != 32)
29886 int i;
29887 rtx spe_save_area_ptr;
29888 HOST_WIDE_INT save_off;
29889 int ool_adjust = 0;
29891 /* Determine whether we can address all of the registers that need
29892 to be saved with an offset from frame_reg_rtx that fits in
29893 the small const field for SPE memory instructions. */
29894 int spe_regs_addressable
29895 = (SPE_CONST_OFFSET_OK (info->spe_gp_save_offset + frame_off
29896 + reg_size * (32 - info->first_gp_reg_save - 1))
29897 && (strategy & SAVE_INLINE_GPRS));
29899 if (spe_regs_addressable)
29901 spe_save_area_ptr = frame_reg_rtx;
29902 save_off = frame_off;
29904 else
29906 /* Make r11 point to the start of the SPE save area. We need
29907 to be careful here if r11 is holding the static chain. If
29908 it is, then temporarily save it in r0. */
29909 HOST_WIDE_INT offset;
29911 if (!(strategy & SAVE_INLINE_GPRS))
29912 ool_adjust = 8 * (info->first_gp_reg_save - FIRST_SAVED_GP_REGNO);
29913 offset = info->spe_gp_save_offset + frame_off - ool_adjust;
29914 spe_save_area_ptr = gen_rtx_REG (Pmode, 11);
29915 save_off = frame_off - offset;
29917 if (using_static_chain_p)
29919 rtx r0 = gen_rtx_REG (Pmode, 0);
29921 START_USE (0);
29922 gcc_assert (info->first_gp_reg_save > 11);
29924 emit_move_insn (r0, spe_save_area_ptr);
29926 else if (REGNO (frame_reg_rtx) != 11)
29927 START_USE (11);
29929 emit_insn (gen_addsi3 (spe_save_area_ptr,
29930 frame_reg_rtx, GEN_INT (offset)));
29931 if (!using_static_chain_p && REGNO (frame_reg_rtx) == 11)
29932 frame_off = -info->spe_gp_save_offset + ool_adjust;
29935 if ((strategy & SAVE_INLINE_GPRS))
29937 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
29938 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
29939 emit_frame_save (spe_save_area_ptr, reg_mode,
29940 info->first_gp_reg_save + i,
29941 (info->spe_gp_save_offset + save_off
29942 + reg_size * i),
29943 sp_off - save_off);
29945 else
29947 insn = rs6000_emit_savres_rtx (info, spe_save_area_ptr,
29948 info->spe_gp_save_offset + save_off,
29949 0, reg_mode,
29950 SAVRES_SAVE | SAVRES_GPR);
29952 rs6000_frame_related (insn, spe_save_area_ptr, sp_off - save_off,
29953 NULL_RTX, NULL_RTX);
29956 /* Move the static chain pointer back. */
29957 if (!spe_regs_addressable)
29959 if (using_static_chain_p)
29961 emit_move_insn (spe_save_area_ptr, gen_rtx_REG (Pmode, 0));
29962 END_USE (0);
29964 else if (REGNO (frame_reg_rtx) != 11)
29965 END_USE (11);
29968 else if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
29970 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
29971 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
29972 unsigned ptr_regno = ptr_regno_for_savres (sel);
29973 rtx ptr_reg = frame_reg_rtx;
29974 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
29975 int end_save = info->gp_save_offset + info->gp_size;
29976 int ptr_off;
29978 if (ptr_regno == 12)
29979 sp_adjust = 0;
29980 if (!ptr_set_up)
29981 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
29983 /* Need to adjust r11 (r12) if we saved any FPRs. */
29984 if (end_save + frame_off != 0)
29986 rtx offset = GEN_INT (end_save + frame_off);
29988 if (ptr_set_up)
29989 frame_off = -end_save;
29990 else
29991 NOT_INUSE (ptr_regno);
29992 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
29994 else if (!ptr_set_up)
29996 NOT_INUSE (ptr_regno);
29997 emit_move_insn (ptr_reg, frame_reg_rtx);
29999 ptr_off = -end_save;
30000 insn = rs6000_emit_savres_rtx (info, ptr_reg,
30001 info->gp_save_offset + ptr_off,
30002 info->lr_save_offset + ptr_off,
30003 reg_mode, sel);
30004 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
30005 NULL_RTX, NULL_RTX);
30006 if (lr)
30007 END_USE (0);
30009 else if (!WORLD_SAVE_P (info) && (strategy & SAVE_MULTIPLE))
30011 rtvec p;
30012 int i;
30013 p = rtvec_alloc (32 - info->first_gp_reg_save);
30014 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
30015 RTVEC_ELT (p, i)
30016 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
30017 frame_reg_rtx,
30018 info->gp_save_offset + frame_off + reg_size * i);
30019 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
30020 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
30021 NULL_RTX, NULL_RTX);
30023 else if (!WORLD_SAVE_P (info))
30025 int offset = info->gp_save_offset + frame_off;
30026 for (int i = info->first_gp_reg_save; i < 32; i++)
30028 if (rs6000_reg_live_or_pic_offset_p (i)
30029 && !cfun->machine->gpr_is_wrapped_separately[i])
30030 emit_frame_save (frame_reg_rtx, reg_mode, i, offset,
30031 sp_off - frame_off);
30033 offset += reg_size;
30037 if (crtl->calls_eh_return)
30039 unsigned int i;
30040 rtvec p;
30042 for (i = 0; ; ++i)
30044 unsigned int regno = EH_RETURN_DATA_REGNO (i);
30045 if (regno == INVALID_REGNUM)
30046 break;
30049 p = rtvec_alloc (i);
30051 for (i = 0; ; ++i)
30053 unsigned int regno = EH_RETURN_DATA_REGNO (i);
30054 if (regno == INVALID_REGNUM)
30055 break;
30057 rtx set
30058 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
30059 sp_reg_rtx,
30060 info->ehrd_offset + sp_off + reg_size * (int) i);
30061 RTVEC_ELT (p, i) = set;
30062 RTX_FRAME_RELATED_P (set) = 1;
30065 insn = emit_insn (gen_blockage ());
30066 RTX_FRAME_RELATED_P (insn) = 1;
30067 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
30070 /* In AIX ABI we need to make sure r2 is really saved. */
30071 if (TARGET_AIX && crtl->calls_eh_return)
30073 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
30074 rtx join_insn, note;
30075 rtx_insn *save_insn;
30076 long toc_restore_insn;
30078 tmp_reg = gen_rtx_REG (Pmode, 11);
30079 tmp_reg_si = gen_rtx_REG (SImode, 11);
30080 if (using_static_chain_p)
30082 START_USE (0);
30083 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
30085 else
30086 START_USE (11);
30087 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
30088 /* Peek at instruction to which this function returns. If it's
30089 restoring r2, then we know we've already saved r2. We can't
30090 unconditionally save r2 because the value we have will already
30091 be updated if we arrived at this function via a plt call or
30092 toc adjusting stub. */
30093 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
30094 toc_restore_insn = ((TARGET_32BIT ? 0x80410000 : 0xE8410000)
30095 + RS6000_TOC_SAVE_SLOT);
30096 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
30097 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
30098 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
30099 validate_condition_mode (EQ, CCUNSmode);
30100 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
30101 emit_insn (gen_rtx_SET (compare_result,
30102 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
30103 toc_save_done = gen_label_rtx ();
30104 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
30105 gen_rtx_EQ (VOIDmode, compare_result,
30106 const0_rtx),
30107 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
30108 pc_rtx);
30109 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
30110 JUMP_LABEL (jump) = toc_save_done;
30111 LABEL_NUSES (toc_save_done) += 1;
30113 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
30114 TOC_REGNUM, frame_off + RS6000_TOC_SAVE_SLOT,
30115 sp_off - frame_off);
30117 emit_label (toc_save_done);
30119 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
30120 have a CFG that has different saves along different paths.
30121 Move the note to a dummy blockage insn, which describes that
30122 R2 is unconditionally saved after the label. */
30123 /* ??? An alternate representation might be a special insn pattern
30124 containing both the branch and the store. That might let the
30125 code that minimizes the number of DW_CFA_advance opcodes better
30126 freedom in placing the annotations. */
30127 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
30128 if (note)
30129 remove_note (save_insn, note);
30130 else
30131 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
30132 copy_rtx (PATTERN (save_insn)), NULL_RTX);
30133 RTX_FRAME_RELATED_P (save_insn) = 0;
30135 join_insn = emit_insn (gen_blockage ());
30136 REG_NOTES (join_insn) = note;
30137 RTX_FRAME_RELATED_P (join_insn) = 1;
30139 if (using_static_chain_p)
30141 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
30142 END_USE (0);
30144 else
30145 END_USE (11);
30148 /* Save CR if we use any that must be preserved. */
30149 if (!WORLD_SAVE_P (info) && info->cr_save_p)
30151 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
30152 GEN_INT (info->cr_save_offset + frame_off));
30153 rtx mem = gen_frame_mem (SImode, addr);
30155 /* If we didn't copy cr before, do so now using r0. */
30156 if (cr_save_rtx == NULL_RTX)
30158 START_USE (0);
30159 cr_save_rtx = gen_rtx_REG (SImode, 0);
30160 rs6000_emit_move_from_cr (cr_save_rtx);
30163 /* Saving CR requires a two-instruction sequence: one instruction
30164 to move the CR to a general-purpose register, and a second
30165 instruction that stores the GPR to memory.
30167 We do not emit any DWARF CFI records for the first of these,
30168 because we cannot properly represent the fact that CR is saved in
30169 a register. One reason is that we cannot express that multiple
30170 CR fields are saved; another reason is that on 64-bit, the size
30171 of the CR register in DWARF (4 bytes) differs from the size of
30172 a general-purpose register.
30174 This means if any intervening instruction were to clobber one of
30175 the call-saved CR fields, we'd have incorrect CFI. To prevent
30176 this from happening, we mark the store to memory as a use of
30177 those CR fields, which prevents any such instruction from being
30178 scheduled in between the two instructions. */
30179 rtx crsave_v[9];
30180 int n_crsave = 0;
30181 int i;
30183 crsave_v[n_crsave++] = gen_rtx_SET (mem, cr_save_rtx);
30184 for (i = 0; i < 8; i++)
30185 if (save_reg_p (CR0_REGNO + i))
30186 crsave_v[n_crsave++]
30187 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
30189 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode,
30190 gen_rtvec_v (n_crsave, crsave_v)));
30191 END_USE (REGNO (cr_save_rtx));
30193 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
30194 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
30195 so we need to construct a frame expression manually. */
30196 RTX_FRAME_RELATED_P (insn) = 1;
30198 /* Update address to be stack-pointer relative, like
30199 rs6000_frame_related would do. */
30200 addr = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
30201 GEN_INT (info->cr_save_offset + sp_off));
30202 mem = gen_frame_mem (SImode, addr);
30204 if (DEFAULT_ABI == ABI_ELFv2)
30206 /* In the ELFv2 ABI we generate separate CFI records for each
30207 CR field that was actually saved. They all point to the
30208 same 32-bit stack slot. */
30209 rtx crframe[8];
30210 int n_crframe = 0;
30212 for (i = 0; i < 8; i++)
30213 if (save_reg_p (CR0_REGNO + i))
30215 crframe[n_crframe]
30216 = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR0_REGNO + i));
30218 RTX_FRAME_RELATED_P (crframe[n_crframe]) = 1;
30219 n_crframe++;
30222 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
30223 gen_rtx_PARALLEL (VOIDmode,
30224 gen_rtvec_v (n_crframe, crframe)));
30226 else
30228 /* In other ABIs, by convention, we use a single CR regnum to
30229 represent the fact that all call-saved CR fields are saved.
30230 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
30231 rtx set = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR2_REGNO));
30232 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
30236 /* In the ELFv2 ABI we need to save all call-saved CR fields into
30237 *separate* slots if the routine calls __builtin_eh_return, so
30238 that they can be independently restored by the unwinder. */
30239 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
30241 int i, cr_off = info->ehcr_offset;
30242 rtx crsave;
30244 /* ??? We might get better performance by using multiple mfocrf
30245 instructions. */
30246 crsave = gen_rtx_REG (SImode, 0);
30247 emit_insn (gen_movesi_from_cr (crsave));
30249 for (i = 0; i < 8; i++)
30250 if (!call_used_regs[CR0_REGNO + i])
30252 rtvec p = rtvec_alloc (2);
30253 RTVEC_ELT (p, 0)
30254 = gen_frame_store (crsave, frame_reg_rtx, cr_off + frame_off);
30255 RTVEC_ELT (p, 1)
30256 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
30258 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
30260 RTX_FRAME_RELATED_P (insn) = 1;
30261 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
30262 gen_frame_store (gen_rtx_REG (SImode, CR0_REGNO + i),
30263 sp_reg_rtx, cr_off + sp_off));
30265 cr_off += reg_size;
30269 /* Update stack and set back pointer unless this is V.4,
30270 for which it was done previously. */
30271 if (!WORLD_SAVE_P (info) && info->push_p
30272 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
30274 rtx ptr_reg = NULL;
30275 int ptr_off = 0;
30277 /* If saving altivec regs we need to be able to address all save
30278 locations using a 16-bit offset. */
30279 if ((strategy & SAVE_INLINE_VRS) == 0
30280 || (info->altivec_size != 0
30281 && (info->altivec_save_offset + info->altivec_size - 16
30282 + info->total_size - frame_off) > 32767)
30283 || (info->vrsave_size != 0
30284 && (info->vrsave_save_offset
30285 + info->total_size - frame_off) > 32767))
30287 int sel = SAVRES_SAVE | SAVRES_VR;
30288 unsigned ptr_regno = ptr_regno_for_savres (sel);
30290 if (using_static_chain_p
30291 && ptr_regno == STATIC_CHAIN_REGNUM)
30292 ptr_regno = 12;
30293 if (REGNO (frame_reg_rtx) != ptr_regno)
30294 START_USE (ptr_regno);
30295 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
30296 frame_reg_rtx = ptr_reg;
30297 ptr_off = info->altivec_save_offset + info->altivec_size;
30298 frame_off = -ptr_off;
30300 else if (REGNO (frame_reg_rtx) == 1)
30301 frame_off = info->total_size;
30302 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
30303 ptr_reg, ptr_off);
30304 if (REGNO (frame_reg_rtx) == 12)
30305 sp_adjust = 0;
30306 sp_off = info->total_size;
30307 if (frame_reg_rtx != sp_reg_rtx)
30308 rs6000_emit_stack_tie (frame_reg_rtx, false);
30311 /* Set frame pointer, if needed. */
30312 if (frame_pointer_needed)
30314 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
30315 sp_reg_rtx);
30316 RTX_FRAME_RELATED_P (insn) = 1;
30319 /* Save AltiVec registers if needed. Save here because the red zone does
30320 not always include AltiVec registers. */
30321 if (!WORLD_SAVE_P (info)
30322 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
30324 int end_save = info->altivec_save_offset + info->altivec_size;
30325 int ptr_off;
30326 /* Oddly, the vector save/restore functions point r0 at the end
30327 of the save area, then use r11 or r12 to load offsets for
30328 [reg+reg] addressing. */
30329 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
30330 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
30331 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
30333 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
30334 NOT_INUSE (0);
30335 if (scratch_regno == 12)
30336 sp_adjust = 0;
30337 if (end_save + frame_off != 0)
30339 rtx offset = GEN_INT (end_save + frame_off);
30341 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
30343 else
30344 emit_move_insn (ptr_reg, frame_reg_rtx);
30346 ptr_off = -end_save;
30347 insn = rs6000_emit_savres_rtx (info, scratch_reg,
30348 info->altivec_save_offset + ptr_off,
30349 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
30350 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
30351 NULL_RTX, NULL_RTX);
30352 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
30354 /* The oddity mentioned above clobbered our frame reg. */
30355 emit_move_insn (frame_reg_rtx, ptr_reg);
30356 frame_off = ptr_off;
30359 else if (!WORLD_SAVE_P (info)
30360 && info->altivec_size != 0)
30362 int i;
30364 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
30365 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
30367 rtx areg, savereg, mem;
30368 HOST_WIDE_INT offset;
30370 offset = (info->altivec_save_offset + frame_off
30371 + 16 * (i - info->first_altivec_reg_save));
30373 savereg = gen_rtx_REG (V4SImode, i);
30375 if (TARGET_P9_DFORM_VECTOR && quad_address_offset_p (offset))
30377 mem = gen_frame_mem (V4SImode,
30378 gen_rtx_PLUS (Pmode, frame_reg_rtx,
30379 GEN_INT (offset)));
30380 insn = emit_insn (gen_rtx_SET (mem, savereg));
30381 areg = NULL_RTX;
30383 else
30385 NOT_INUSE (0);
30386 areg = gen_rtx_REG (Pmode, 0);
30387 emit_move_insn (areg, GEN_INT (offset));
30389 /* AltiVec addressing mode is [reg+reg]. */
30390 mem = gen_frame_mem (V4SImode,
30391 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
30393 /* Rather than emitting a generic move, force use of the stvx
30394 instruction, which we always want on ISA 2.07 (power8) systems.
30395 In particular we don't want xxpermdi/stxvd2x for little
30396 endian. */
30397 insn = emit_insn (gen_altivec_stvx_v4si_internal (mem, savereg));
30400 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
30401 areg, GEN_INT (offset));
30405 /* VRSAVE is a bit vector representing which AltiVec registers
30406 are used. The OS uses this to determine which vector
30407 registers to save on a context switch. We need to save
30408 VRSAVE on the stack frame, add whatever AltiVec registers we
30409 used in this function, and do the corresponding magic in the
30410 epilogue. */
30412 if (!WORLD_SAVE_P (info)
30413 && info->vrsave_size != 0)
30415 rtx reg, vrsave;
30416 int offset;
30417 int save_regno;
30419 /* Get VRSAVE onto a GPR. Note that ABI_V4 and ABI_DARWIN might
30420 be using r12 as frame_reg_rtx and r11 as the static chain
30421 pointer for nested functions. */
30422 save_regno = 12;
30423 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
30424 && !using_static_chain_p)
30425 save_regno = 11;
30426 else if (using_split_stack || REGNO (frame_reg_rtx) == 12)
30428 save_regno = 11;
30429 if (using_static_chain_p)
30430 save_regno = 0;
30433 NOT_INUSE (save_regno);
30434 reg = gen_rtx_REG (SImode, save_regno);
30435 vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
30436 if (TARGET_MACHO)
30437 emit_insn (gen_get_vrsave_internal (reg));
30438 else
30439 emit_insn (gen_rtx_SET (reg, vrsave));
30441 /* Save VRSAVE. */
30442 offset = info->vrsave_save_offset + frame_off;
30443 insn = emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
30445 /* Include the registers in the mask. */
30446 emit_insn (gen_iorsi3 (reg, reg, GEN_INT ((int) info->vrsave_mask)));
30448 insn = emit_insn (generate_set_vrsave (reg, info, 0));
30451 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
30452 if (!TARGET_SINGLE_PIC_BASE
30453 && ((TARGET_TOC && TARGET_MINIMAL_TOC
30454 && !constant_pool_empty_p ())
30455 || (DEFAULT_ABI == ABI_V4
30456 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
30457 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
30459 /* If emit_load_toc_table will use the link register, we need to save
30460 it. We use R12 for this purpose because emit_load_toc_table
30461 can use register 0. This allows us to use a plain 'blr' to return
30462 from the procedure more often. */
30463 int save_LR_around_toc_setup = (TARGET_ELF
30464 && DEFAULT_ABI == ABI_V4
30465 && flag_pic
30466 && ! info->lr_save_p
30467 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0);
30468 if (save_LR_around_toc_setup)
30470 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
30471 rtx tmp = gen_rtx_REG (Pmode, 12);
30473 sp_adjust = 0;
30474 insn = emit_move_insn (tmp, lr);
30475 RTX_FRAME_RELATED_P (insn) = 1;
30477 rs6000_emit_load_toc_table (TRUE);
30479 insn = emit_move_insn (lr, tmp);
30480 add_reg_note (insn, REG_CFA_RESTORE, lr);
30481 RTX_FRAME_RELATED_P (insn) = 1;
30483 else
30484 rs6000_emit_load_toc_table (TRUE);
30487 #if TARGET_MACHO
30488 if (!TARGET_SINGLE_PIC_BASE
30489 && DEFAULT_ABI == ABI_DARWIN
30490 && flag_pic && crtl->uses_pic_offset_table)
30492 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
30493 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
30495 /* Save and restore LR locally around this call (in R0). */
30496 if (!info->lr_save_p)
30497 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
30499 emit_insn (gen_load_macho_picbase (src));
30501 emit_move_insn (gen_rtx_REG (Pmode,
30502 RS6000_PIC_OFFSET_TABLE_REGNUM),
30503 lr);
30505 if (!info->lr_save_p)
30506 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
30508 #endif
30510 /* If we need to, save the TOC register after doing the stack setup.
30511 Do not emit eh frame info for this save. The unwinder wants info,
30512 conceptually attached to instructions in this function, about
30513 register values in the caller of this function. This R2 may have
30514 already been changed from the value in the caller.
30515 We don't attempt to write accurate DWARF EH frame info for R2
30516 because code emitted by gcc for a (non-pointer) function call
30517 doesn't save and restore R2. Instead, R2 is managed out-of-line
30518 by a linker generated plt call stub when the function resides in
30519 a shared library. This behavior is costly to describe in DWARF,
30520 both in terms of the size of DWARF info and the time taken in the
30521 unwinder to interpret it. R2 changes, apart from the
30522 calls_eh_return case earlier in this function, are handled by
30523 linux-unwind.h frob_update_context. */
30524 if (rs6000_save_toc_in_prologue_p ())
30526 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
30527 emit_insn (gen_frame_store (reg, sp_reg_rtx, RS6000_TOC_SAVE_SLOT));
30530 if (using_split_stack && split_stack_arg_pointer_used_p ())
30532 /* Set up the arg pointer (r12) for -fsplit-stack code. If
30533 __morestack was called, it left the arg pointer to the old
30534 stack in r29. Otherwise, the arg pointer is the top of the
30535 current frame. */
30536 cfun->machine->split_stack_argp_used = true;
30537 if (sp_adjust)
30539 rtx r12 = gen_rtx_REG (Pmode, 12);
30540 rtx set_r12 = gen_rtx_SET (r12, sp_reg_rtx);
30541 emit_insn_before (set_r12, sp_adjust);
30543 else if (frame_off != 0 || REGNO (frame_reg_rtx) != 12)
30545 rtx r12 = gen_rtx_REG (Pmode, 12);
30546 if (frame_off == 0)
30547 emit_move_insn (r12, frame_reg_rtx);
30548 else
30549 emit_insn (gen_add3_insn (r12, frame_reg_rtx, GEN_INT (frame_off)));
30551 if (info->push_p)
30553 rtx r12 = gen_rtx_REG (Pmode, 12);
30554 rtx r29 = gen_rtx_REG (Pmode, 29);
30555 rtx cr7 = gen_rtx_REG (CCUNSmode, CR7_REGNO);
30556 rtx not_more = gen_label_rtx ();
30557 rtx jump;
30559 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
30560 gen_rtx_GEU (VOIDmode, cr7, const0_rtx),
30561 gen_rtx_LABEL_REF (VOIDmode, not_more),
30562 pc_rtx);
30563 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
30564 JUMP_LABEL (jump) = not_more;
30565 LABEL_NUSES (not_more) += 1;
30566 emit_move_insn (r12, r29);
30567 emit_label (not_more);
30572 /* Output .extern statements for the save/restore routines we use. */
30574 static void
30575 rs6000_output_savres_externs (FILE *file)
30577 rs6000_stack_t *info = rs6000_stack_info ();
30579 if (TARGET_DEBUG_STACK)
30580 debug_stack_info (info);
30582 /* Write .extern for any function we will call to save and restore
30583 fp values. */
30584 if (info->first_fp_reg_save < 64
30585 && !TARGET_MACHO
30586 && !TARGET_ELF)
30588 char *name;
30589 int regno = info->first_fp_reg_save - 32;
30591 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
30593 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
30594 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
30595 name = rs6000_savres_routine_name (info, regno, sel);
30596 fprintf (file, "\t.extern %s\n", name);
30598 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
30600 bool lr = (info->savres_strategy
30601 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
30602 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
30603 name = rs6000_savres_routine_name (info, regno, sel);
30604 fprintf (file, "\t.extern %s\n", name);
30609 /* Write function prologue. */
30611 static void
30612 rs6000_output_function_prologue (FILE *file,
30613 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
30615 if (!cfun->is_thunk)
30616 rs6000_output_savres_externs (file);
30618 /* ELFv2 ABI r2 setup code and local entry point. This must follow
30619 immediately after the global entry point label. */
30620 if (rs6000_global_entry_point_needed_p ())
30622 const char *name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
30624 (*targetm.asm_out.internal_label) (file, "LCF", rs6000_pic_labelno);
30626 if (TARGET_CMODEL != CMODEL_LARGE)
30628 /* In the small and medium code models, we assume the TOC is less
30629 2 GB away from the text section, so it can be computed via the
30630 following two-instruction sequence. */
30631 char buf[256];
30633 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
30634 fprintf (file, "0:\taddis 2,12,.TOC.-");
30635 assemble_name (file, buf);
30636 fprintf (file, "@ha\n");
30637 fprintf (file, "\taddi 2,2,.TOC.-");
30638 assemble_name (file, buf);
30639 fprintf (file, "@l\n");
30641 else
30643 /* In the large code model, we allow arbitrary offsets between the
30644 TOC and the text section, so we have to load the offset from
30645 memory. The data field is emitted directly before the global
30646 entry point in rs6000_elf_declare_function_name. */
30647 char buf[256];
30649 #ifdef HAVE_AS_ENTRY_MARKERS
30650 /* If supported by the linker, emit a marker relocation. If the
30651 total code size of the final executable or shared library
30652 happens to fit into 2 GB after all, the linker will replace
30653 this code sequence with the sequence for the small or medium
30654 code model. */
30655 fprintf (file, "\t.reloc .,R_PPC64_ENTRY\n");
30656 #endif
30657 fprintf (file, "\tld 2,");
30658 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
30659 assemble_name (file, buf);
30660 fprintf (file, "-");
30661 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
30662 assemble_name (file, buf);
30663 fprintf (file, "(12)\n");
30664 fprintf (file, "\tadd 2,2,12\n");
30667 fputs ("\t.localentry\t", file);
30668 assemble_name (file, name);
30669 fputs (",.-", file);
30670 assemble_name (file, name);
30671 fputs ("\n", file);
30674 /* Output -mprofile-kernel code. This needs to be done here instead of
30675 in output_function_profile since it must go after the ELFv2 ABI
30676 local entry point. */
30677 if (TARGET_PROFILE_KERNEL && crtl->profile)
30679 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
30680 gcc_assert (!TARGET_32BIT);
30682 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
30684 /* In the ELFv2 ABI we have no compiler stack word. It must be
30685 the resposibility of _mcount to preserve the static chain
30686 register if required. */
30687 if (DEFAULT_ABI != ABI_ELFv2
30688 && cfun->static_chain_decl != NULL)
30690 asm_fprintf (file, "\tstd %s,24(%s)\n",
30691 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
30692 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
30693 asm_fprintf (file, "\tld %s,24(%s)\n",
30694 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
30696 else
30697 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
30700 rs6000_pic_labelno++;
30703 /* -mprofile-kernel code calls mcount before the function prolog,
30704 so a profiled leaf function should stay a leaf function. */
30705 static bool
30706 rs6000_keep_leaf_when_profiled ()
30708 return TARGET_PROFILE_KERNEL;
30711 /* Non-zero if vmx regs are restored before the frame pop, zero if
30712 we restore after the pop when possible. */
30713 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
30715 /* Restoring cr is a two step process: loading a reg from the frame
30716 save, then moving the reg to cr. For ABI_V4 we must let the
30717 unwinder know that the stack location is no longer valid at or
30718 before the stack deallocation, but we can't emit a cfa_restore for
30719 cr at the stack deallocation like we do for other registers.
30720 The trouble is that it is possible for the move to cr to be
30721 scheduled after the stack deallocation. So say exactly where cr
30722 is located on each of the two insns. */
30724 static rtx
30725 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
30727 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
30728 rtx reg = gen_rtx_REG (SImode, regno);
30729 rtx_insn *insn = emit_move_insn (reg, mem);
30731 if (!exit_func && DEFAULT_ABI == ABI_V4)
30733 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
30734 rtx set = gen_rtx_SET (reg, cr);
30736 add_reg_note (insn, REG_CFA_REGISTER, set);
30737 RTX_FRAME_RELATED_P (insn) = 1;
30739 return reg;
30742 /* Reload CR from REG. */
30744 static void
30745 restore_saved_cr (rtx reg, int using_mfcr_multiple, bool exit_func)
30747 int count = 0;
30748 int i;
30750 if (using_mfcr_multiple)
30752 for (i = 0; i < 8; i++)
30753 if (save_reg_p (CR0_REGNO + i))
30754 count++;
30755 gcc_assert (count);
30758 if (using_mfcr_multiple && count > 1)
30760 rtx_insn *insn;
30761 rtvec p;
30762 int ndx;
30764 p = rtvec_alloc (count);
30766 ndx = 0;
30767 for (i = 0; i < 8; i++)
30768 if (save_reg_p (CR0_REGNO + i))
30770 rtvec r = rtvec_alloc (2);
30771 RTVEC_ELT (r, 0) = reg;
30772 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
30773 RTVEC_ELT (p, ndx) =
30774 gen_rtx_SET (gen_rtx_REG (CCmode, CR0_REGNO + i),
30775 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
30776 ndx++;
30778 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
30779 gcc_assert (ndx == count);
30781 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
30782 CR field separately. */
30783 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
30785 for (i = 0; i < 8; i++)
30786 if (save_reg_p (CR0_REGNO + i))
30787 add_reg_note (insn, REG_CFA_RESTORE,
30788 gen_rtx_REG (SImode, CR0_REGNO + i));
30790 RTX_FRAME_RELATED_P (insn) = 1;
30793 else
30794 for (i = 0; i < 8; i++)
30795 if (save_reg_p (CR0_REGNO + i))
30797 rtx insn = emit_insn (gen_movsi_to_cr_one
30798 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
30800 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
30801 CR field separately, attached to the insn that in fact
30802 restores this particular CR field. */
30803 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
30805 add_reg_note (insn, REG_CFA_RESTORE,
30806 gen_rtx_REG (SImode, CR0_REGNO + i));
30808 RTX_FRAME_RELATED_P (insn) = 1;
30812 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
30813 if (!exit_func && DEFAULT_ABI != ABI_ELFv2
30814 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
30816 rtx_insn *insn = get_last_insn ();
30817 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
30819 add_reg_note (insn, REG_CFA_RESTORE, cr);
30820 RTX_FRAME_RELATED_P (insn) = 1;
30824 /* Like cr, the move to lr instruction can be scheduled after the
30825 stack deallocation, but unlike cr, its stack frame save is still
30826 valid. So we only need to emit the cfa_restore on the correct
30827 instruction. */
30829 static void
30830 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
30832 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
30833 rtx reg = gen_rtx_REG (Pmode, regno);
30835 emit_move_insn (reg, mem);
30838 static void
30839 restore_saved_lr (int regno, bool exit_func)
30841 rtx reg = gen_rtx_REG (Pmode, regno);
30842 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
30843 rtx_insn *insn = emit_move_insn (lr, reg);
30845 if (!exit_func && flag_shrink_wrap)
30847 add_reg_note (insn, REG_CFA_RESTORE, lr);
30848 RTX_FRAME_RELATED_P (insn) = 1;
30852 static rtx
30853 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
30855 if (DEFAULT_ABI == ABI_ELFv2)
30857 int i;
30858 for (i = 0; i < 8; i++)
30859 if (save_reg_p (CR0_REGNO + i))
30861 rtx cr = gen_rtx_REG (SImode, CR0_REGNO + i);
30862 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, cr,
30863 cfa_restores);
30866 else if (info->cr_save_p)
30867 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
30868 gen_rtx_REG (SImode, CR2_REGNO),
30869 cfa_restores);
30871 if (info->lr_save_p)
30872 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
30873 gen_rtx_REG (Pmode, LR_REGNO),
30874 cfa_restores);
30875 return cfa_restores;
30878 /* Return true if OFFSET from stack pointer can be clobbered by signals.
30879 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
30880 below stack pointer not cloberred by signals. */
30882 static inline bool
30883 offset_below_red_zone_p (HOST_WIDE_INT offset)
30885 return offset < (DEFAULT_ABI == ABI_V4
30887 : TARGET_32BIT ? -220 : -288);
30890 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
30892 static void
30893 emit_cfa_restores (rtx cfa_restores)
30895 rtx_insn *insn = get_last_insn ();
30896 rtx *loc = &REG_NOTES (insn);
30898 while (*loc)
30899 loc = &XEXP (*loc, 1);
30900 *loc = cfa_restores;
30901 RTX_FRAME_RELATED_P (insn) = 1;
30904 /* Emit function epilogue as insns. */
30906 void
30907 rs6000_emit_epilogue (int sibcall)
30909 rs6000_stack_t *info;
30910 int restoring_GPRs_inline;
30911 int restoring_FPRs_inline;
30912 int using_load_multiple;
30913 int using_mtcr_multiple;
30914 int use_backchain_to_restore_sp;
30915 int restore_lr;
30916 int strategy;
30917 HOST_WIDE_INT frame_off = 0;
30918 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
30919 rtx frame_reg_rtx = sp_reg_rtx;
30920 rtx cfa_restores = NULL_RTX;
30921 rtx insn;
30922 rtx cr_save_reg = NULL_RTX;
30923 machine_mode reg_mode = Pmode;
30924 int reg_size = TARGET_32BIT ? 4 : 8;
30925 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
30926 ? DFmode : SFmode;
30927 int fp_reg_size = 8;
30928 int i;
30929 bool exit_func;
30930 unsigned ptr_regno;
30932 info = rs6000_stack_info ();
30934 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
30936 reg_mode = V2SImode;
30937 reg_size = 8;
30940 strategy = info->savres_strategy;
30941 using_load_multiple = strategy & REST_MULTIPLE;
30942 restoring_FPRs_inline = sibcall || (strategy & REST_INLINE_FPRS);
30943 restoring_GPRs_inline = sibcall || (strategy & REST_INLINE_GPRS);
30944 using_mtcr_multiple = (rs6000_cpu == PROCESSOR_PPC601
30945 || rs6000_cpu == PROCESSOR_PPC603
30946 || rs6000_cpu == PROCESSOR_PPC750
30947 || optimize_size);
30948 /* Restore via the backchain when we have a large frame, since this
30949 is more efficient than an addis, addi pair. The second condition
30950 here will not trigger at the moment; We don't actually need a
30951 frame pointer for alloca, but the generic parts of the compiler
30952 give us one anyway. */
30953 use_backchain_to_restore_sp = (info->total_size + (info->lr_save_p
30954 ? info->lr_save_offset
30955 : 0) > 32767
30956 || (cfun->calls_alloca
30957 && !frame_pointer_needed));
30958 restore_lr = (info->lr_save_p
30959 && (restoring_FPRs_inline
30960 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
30961 && (restoring_GPRs_inline
30962 || info->first_fp_reg_save < 64)
30963 && !cfun->machine->lr_is_wrapped_separately);
30966 if (WORLD_SAVE_P (info))
30968 int i, j;
30969 char rname[30];
30970 const char *alloc_rname;
30971 rtvec p;
30973 /* eh_rest_world_r10 will return to the location saved in the LR
30974 stack slot (which is not likely to be our caller.)
30975 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
30976 rest_world is similar, except any R10 parameter is ignored.
30977 The exception-handling stuff that was here in 2.95 is no
30978 longer necessary. */
30980 p = rtvec_alloc (9
30981 + 32 - info->first_gp_reg_save
30982 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
30983 + 63 + 1 - info->first_fp_reg_save);
30985 strcpy (rname, ((crtl->calls_eh_return) ?
30986 "*eh_rest_world_r10" : "*rest_world"));
30987 alloc_rname = ggc_strdup (rname);
30989 j = 0;
30990 RTVEC_ELT (p, j++) = ret_rtx;
30991 RTVEC_ELT (p, j++)
30992 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
30993 /* The instruction pattern requires a clobber here;
30994 it is shared with the restVEC helper. */
30995 RTVEC_ELT (p, j++)
30996 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 11));
30999 /* CR register traditionally saved as CR2. */
31000 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
31001 RTVEC_ELT (p, j++)
31002 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
31003 if (flag_shrink_wrap)
31005 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
31006 gen_rtx_REG (Pmode, LR_REGNO),
31007 cfa_restores);
31008 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
31012 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
31014 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
31015 RTVEC_ELT (p, j++)
31016 = gen_frame_load (reg,
31017 frame_reg_rtx, info->gp_save_offset + reg_size * i);
31018 if (flag_shrink_wrap)
31019 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
31021 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
31023 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
31024 RTVEC_ELT (p, j++)
31025 = gen_frame_load (reg,
31026 frame_reg_rtx, info->altivec_save_offset + 16 * i);
31027 if (flag_shrink_wrap)
31028 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
31030 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
31032 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
31033 ? DFmode : SFmode),
31034 info->first_fp_reg_save + i);
31035 RTVEC_ELT (p, j++)
31036 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
31037 if (flag_shrink_wrap)
31038 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
31040 RTVEC_ELT (p, j++)
31041 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 0));
31042 RTVEC_ELT (p, j++)
31043 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 12));
31044 RTVEC_ELT (p, j++)
31045 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 7));
31046 RTVEC_ELT (p, j++)
31047 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 8));
31048 RTVEC_ELT (p, j++)
31049 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
31050 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
31052 if (flag_shrink_wrap)
31054 REG_NOTES (insn) = cfa_restores;
31055 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
31056 RTX_FRAME_RELATED_P (insn) = 1;
31058 return;
31061 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
31062 if (info->push_p)
31063 frame_off = info->total_size;
31065 /* Restore AltiVec registers if we must do so before adjusting the
31066 stack. */
31067 if (info->altivec_size != 0
31068 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
31069 || (DEFAULT_ABI != ABI_V4
31070 && offset_below_red_zone_p (info->altivec_save_offset))))
31072 int i;
31073 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
31075 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
31076 if (use_backchain_to_restore_sp)
31078 int frame_regno = 11;
31080 if ((strategy & REST_INLINE_VRS) == 0)
31082 /* Of r11 and r12, select the one not clobbered by an
31083 out-of-line restore function for the frame register. */
31084 frame_regno = 11 + 12 - scratch_regno;
31086 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
31087 emit_move_insn (frame_reg_rtx,
31088 gen_rtx_MEM (Pmode, sp_reg_rtx));
31089 frame_off = 0;
31091 else if (frame_pointer_needed)
31092 frame_reg_rtx = hard_frame_pointer_rtx;
31094 if ((strategy & REST_INLINE_VRS) == 0)
31096 int end_save = info->altivec_save_offset + info->altivec_size;
31097 int ptr_off;
31098 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
31099 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
31101 if (end_save + frame_off != 0)
31103 rtx offset = GEN_INT (end_save + frame_off);
31105 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
31107 else
31108 emit_move_insn (ptr_reg, frame_reg_rtx);
31110 ptr_off = -end_save;
31111 insn = rs6000_emit_savres_rtx (info, scratch_reg,
31112 info->altivec_save_offset + ptr_off,
31113 0, V4SImode, SAVRES_VR);
31115 else
31117 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
31118 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
31120 rtx addr, areg, mem, insn;
31121 rtx reg = gen_rtx_REG (V4SImode, i);
31122 HOST_WIDE_INT offset
31123 = (info->altivec_save_offset + frame_off
31124 + 16 * (i - info->first_altivec_reg_save));
31126 if (TARGET_P9_DFORM_VECTOR && quad_address_offset_p (offset))
31128 mem = gen_frame_mem (V4SImode,
31129 gen_rtx_PLUS (Pmode, frame_reg_rtx,
31130 GEN_INT (offset)));
31131 insn = gen_rtx_SET (reg, mem);
31133 else
31135 areg = gen_rtx_REG (Pmode, 0);
31136 emit_move_insn (areg, GEN_INT (offset));
31138 /* AltiVec addressing mode is [reg+reg]. */
31139 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
31140 mem = gen_frame_mem (V4SImode, addr);
31142 /* Rather than emitting a generic move, force use of the
31143 lvx instruction, which we always want. In particular we
31144 don't want lxvd2x/xxpermdi for little endian. */
31145 insn = gen_altivec_lvx_v4si_internal (reg, mem);
31148 (void) emit_insn (insn);
31152 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
31153 if (((strategy & REST_INLINE_VRS) == 0
31154 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
31155 && (flag_shrink_wrap
31156 || (offset_below_red_zone_p
31157 (info->altivec_save_offset
31158 + 16 * (i - info->first_altivec_reg_save)))))
31160 rtx reg = gen_rtx_REG (V4SImode, i);
31161 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
31165 /* Restore VRSAVE if we must do so before adjusting the stack. */
31166 if (info->vrsave_size != 0
31167 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
31168 || (DEFAULT_ABI != ABI_V4
31169 && offset_below_red_zone_p (info->vrsave_save_offset))))
31171 rtx reg;
31173 if (frame_reg_rtx == sp_reg_rtx)
31175 if (use_backchain_to_restore_sp)
31177 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
31178 emit_move_insn (frame_reg_rtx,
31179 gen_rtx_MEM (Pmode, sp_reg_rtx));
31180 frame_off = 0;
31182 else if (frame_pointer_needed)
31183 frame_reg_rtx = hard_frame_pointer_rtx;
31186 reg = gen_rtx_REG (SImode, 12);
31187 emit_insn (gen_frame_load (reg, frame_reg_rtx,
31188 info->vrsave_save_offset + frame_off));
31190 emit_insn (generate_set_vrsave (reg, info, 1));
31193 insn = NULL_RTX;
31194 /* If we have a large stack frame, restore the old stack pointer
31195 using the backchain. */
31196 if (use_backchain_to_restore_sp)
31198 if (frame_reg_rtx == sp_reg_rtx)
31200 /* Under V.4, don't reset the stack pointer until after we're done
31201 loading the saved registers. */
31202 if (DEFAULT_ABI == ABI_V4)
31203 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
31205 insn = emit_move_insn (frame_reg_rtx,
31206 gen_rtx_MEM (Pmode, sp_reg_rtx));
31207 frame_off = 0;
31209 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
31210 && DEFAULT_ABI == ABI_V4)
31211 /* frame_reg_rtx has been set up by the altivec restore. */
31213 else
31215 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
31216 frame_reg_rtx = sp_reg_rtx;
31219 /* If we have a frame pointer, we can restore the old stack pointer
31220 from it. */
31221 else if (frame_pointer_needed)
31223 frame_reg_rtx = sp_reg_rtx;
31224 if (DEFAULT_ABI == ABI_V4)
31225 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
31226 /* Prevent reordering memory accesses against stack pointer restore. */
31227 else if (cfun->calls_alloca
31228 || offset_below_red_zone_p (-info->total_size))
31229 rs6000_emit_stack_tie (frame_reg_rtx, true);
31231 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
31232 GEN_INT (info->total_size)));
31233 frame_off = 0;
31235 else if (info->push_p
31236 && DEFAULT_ABI != ABI_V4
31237 && !crtl->calls_eh_return)
31239 /* Prevent reordering memory accesses against stack pointer restore. */
31240 if (cfun->calls_alloca
31241 || offset_below_red_zone_p (-info->total_size))
31242 rs6000_emit_stack_tie (frame_reg_rtx, false);
31243 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
31244 GEN_INT (info->total_size)));
31245 frame_off = 0;
31247 if (insn && frame_reg_rtx == sp_reg_rtx)
31249 if (cfa_restores)
31251 REG_NOTES (insn) = cfa_restores;
31252 cfa_restores = NULL_RTX;
31254 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
31255 RTX_FRAME_RELATED_P (insn) = 1;
31258 /* Restore AltiVec registers if we have not done so already. */
31259 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
31260 && info->altivec_size != 0
31261 && (DEFAULT_ABI == ABI_V4
31262 || !offset_below_red_zone_p (info->altivec_save_offset)))
31264 int i;
31266 if ((strategy & REST_INLINE_VRS) == 0)
31268 int end_save = info->altivec_save_offset + info->altivec_size;
31269 int ptr_off;
31270 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
31271 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
31272 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
31274 if (end_save + frame_off != 0)
31276 rtx offset = GEN_INT (end_save + frame_off);
31278 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
31280 else
31281 emit_move_insn (ptr_reg, frame_reg_rtx);
31283 ptr_off = -end_save;
31284 insn = rs6000_emit_savres_rtx (info, scratch_reg,
31285 info->altivec_save_offset + ptr_off,
31286 0, V4SImode, SAVRES_VR);
31287 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
31289 /* Frame reg was clobbered by out-of-line save. Restore it
31290 from ptr_reg, and if we are calling out-of-line gpr or
31291 fpr restore set up the correct pointer and offset. */
31292 unsigned newptr_regno = 1;
31293 if (!restoring_GPRs_inline)
31295 bool lr = info->gp_save_offset + info->gp_size == 0;
31296 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
31297 newptr_regno = ptr_regno_for_savres (sel);
31298 end_save = info->gp_save_offset + info->gp_size;
31300 else if (!restoring_FPRs_inline)
31302 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
31303 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
31304 newptr_regno = ptr_regno_for_savres (sel);
31305 end_save = info->fp_save_offset + info->fp_size;
31308 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
31309 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
31311 if (end_save + ptr_off != 0)
31313 rtx offset = GEN_INT (end_save + ptr_off);
31315 frame_off = -end_save;
31316 if (TARGET_32BIT)
31317 emit_insn (gen_addsi3_carry (frame_reg_rtx,
31318 ptr_reg, offset));
31319 else
31320 emit_insn (gen_adddi3_carry (frame_reg_rtx,
31321 ptr_reg, offset));
31323 else
31325 frame_off = ptr_off;
31326 emit_move_insn (frame_reg_rtx, ptr_reg);
31330 else
31332 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
31333 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
31335 rtx addr, areg, mem, insn;
31336 rtx reg = gen_rtx_REG (V4SImode, i);
31337 HOST_WIDE_INT offset
31338 = (info->altivec_save_offset + frame_off
31339 + 16 * (i - info->first_altivec_reg_save));
31341 if (TARGET_P9_DFORM_VECTOR && quad_address_offset_p (offset))
31343 mem = gen_frame_mem (V4SImode,
31344 gen_rtx_PLUS (Pmode, frame_reg_rtx,
31345 GEN_INT (offset)));
31346 insn = gen_rtx_SET (reg, mem);
31348 else
31350 areg = gen_rtx_REG (Pmode, 0);
31351 emit_move_insn (areg, GEN_INT (offset));
31353 /* AltiVec addressing mode is [reg+reg]. */
31354 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
31355 mem = gen_frame_mem (V4SImode, addr);
31357 /* Rather than emitting a generic move, force use of the
31358 lvx instruction, which we always want. In particular we
31359 don't want lxvd2x/xxpermdi for little endian. */
31360 insn = gen_altivec_lvx_v4si_internal (reg, mem);
31363 (void) emit_insn (insn);
31367 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
31368 if (((strategy & REST_INLINE_VRS) == 0
31369 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
31370 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
31372 rtx reg = gen_rtx_REG (V4SImode, i);
31373 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
31377 /* Restore VRSAVE if we have not done so already. */
31378 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
31379 && info->vrsave_size != 0
31380 && (DEFAULT_ABI == ABI_V4
31381 || !offset_below_red_zone_p (info->vrsave_save_offset)))
31383 rtx reg;
31385 reg = gen_rtx_REG (SImode, 12);
31386 emit_insn (gen_frame_load (reg, frame_reg_rtx,
31387 info->vrsave_save_offset + frame_off));
31389 emit_insn (generate_set_vrsave (reg, info, 1));
31392 /* If we exit by an out-of-line restore function on ABI_V4 then that
31393 function will deallocate the stack, so we don't need to worry
31394 about the unwinder restoring cr from an invalid stack frame
31395 location. */
31396 exit_func = (!restoring_FPRs_inline
31397 || (!restoring_GPRs_inline
31398 && info->first_fp_reg_save == 64));
31400 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
31401 *separate* slots if the routine calls __builtin_eh_return, so
31402 that they can be independently restored by the unwinder. */
31403 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
31405 int i, cr_off = info->ehcr_offset;
31407 for (i = 0; i < 8; i++)
31408 if (!call_used_regs[CR0_REGNO + i])
31410 rtx reg = gen_rtx_REG (SImode, 0);
31411 emit_insn (gen_frame_load (reg, frame_reg_rtx,
31412 cr_off + frame_off));
31414 insn = emit_insn (gen_movsi_to_cr_one
31415 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
31417 if (!exit_func && flag_shrink_wrap)
31419 add_reg_note (insn, REG_CFA_RESTORE,
31420 gen_rtx_REG (SImode, CR0_REGNO + i));
31422 RTX_FRAME_RELATED_P (insn) = 1;
31425 cr_off += reg_size;
31429 /* Get the old lr if we saved it. If we are restoring registers
31430 out-of-line, then the out-of-line routines can do this for us. */
31431 if (restore_lr && restoring_GPRs_inline)
31432 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
31434 /* Get the old cr if we saved it. */
31435 if (info->cr_save_p)
31437 unsigned cr_save_regno = 12;
31439 if (!restoring_GPRs_inline)
31441 /* Ensure we don't use the register used by the out-of-line
31442 gpr register restore below. */
31443 bool lr = info->gp_save_offset + info->gp_size == 0;
31444 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
31445 int gpr_ptr_regno = ptr_regno_for_savres (sel);
31447 if (gpr_ptr_regno == 12)
31448 cr_save_regno = 11;
31449 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
31451 else if (REGNO (frame_reg_rtx) == 12)
31452 cr_save_regno = 11;
31454 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
31455 info->cr_save_offset + frame_off,
31456 exit_func);
31459 /* Set LR here to try to overlap restores below. */
31460 if (restore_lr && restoring_GPRs_inline)
31461 restore_saved_lr (0, exit_func);
31463 /* Load exception handler data registers, if needed. */
31464 if (crtl->calls_eh_return)
31466 unsigned int i, regno;
31468 if (TARGET_AIX)
31470 rtx reg = gen_rtx_REG (reg_mode, 2);
31471 emit_insn (gen_frame_load (reg, frame_reg_rtx,
31472 frame_off + RS6000_TOC_SAVE_SLOT));
31475 for (i = 0; ; ++i)
31477 rtx mem;
31479 regno = EH_RETURN_DATA_REGNO (i);
31480 if (regno == INVALID_REGNUM)
31481 break;
31483 /* Note: possible use of r0 here to address SPE regs. */
31484 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
31485 info->ehrd_offset + frame_off
31486 + reg_size * (int) i);
31488 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
31492 /* Restore GPRs. This is done as a PARALLEL if we are using
31493 the load-multiple instructions. */
31494 if (TARGET_SPE_ABI
31495 && info->spe_64bit_regs_used
31496 && info->first_gp_reg_save != 32)
31498 /* Determine whether we can address all of the registers that need
31499 to be saved with an offset from frame_reg_rtx that fits in
31500 the small const field for SPE memory instructions. */
31501 int spe_regs_addressable
31502 = (SPE_CONST_OFFSET_OK (info->spe_gp_save_offset + frame_off
31503 + reg_size * (32 - info->first_gp_reg_save - 1))
31504 && restoring_GPRs_inline);
31506 if (!spe_regs_addressable)
31508 int ool_adjust = 0;
31509 rtx old_frame_reg_rtx = frame_reg_rtx;
31510 /* Make r11 point to the start of the SPE save area. We worried about
31511 not clobbering it when we were saving registers in the prologue.
31512 There's no need to worry here because the static chain is passed
31513 anew to every function. */
31515 if (!restoring_GPRs_inline)
31516 ool_adjust = 8 * (info->first_gp_reg_save - FIRST_SAVED_GP_REGNO);
31517 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
31518 emit_insn (gen_addsi3 (frame_reg_rtx, old_frame_reg_rtx,
31519 GEN_INT (info->spe_gp_save_offset
31520 + frame_off
31521 - ool_adjust)));
31522 /* Keep the invariant that frame_reg_rtx + frame_off points
31523 at the top of the stack frame. */
31524 frame_off = -info->spe_gp_save_offset + ool_adjust;
31527 if (restoring_GPRs_inline)
31529 HOST_WIDE_INT spe_offset = info->spe_gp_save_offset + frame_off;
31531 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
31532 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
31534 rtx offset, addr, mem, reg;
31536 /* We're doing all this to ensure that the immediate offset
31537 fits into the immediate field of 'evldd'. */
31538 gcc_assert (SPE_CONST_OFFSET_OK (spe_offset + reg_size * i));
31540 offset = GEN_INT (spe_offset + reg_size * i);
31541 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, offset);
31542 mem = gen_rtx_MEM (V2SImode, addr);
31543 reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
31545 emit_move_insn (reg, mem);
31548 else
31549 rs6000_emit_savres_rtx (info, frame_reg_rtx,
31550 info->spe_gp_save_offset + frame_off,
31551 info->lr_save_offset + frame_off,
31552 reg_mode,
31553 SAVRES_GPR | SAVRES_LR);
31555 else if (!restoring_GPRs_inline)
31557 /* We are jumping to an out-of-line function. */
31558 rtx ptr_reg;
31559 int end_save = info->gp_save_offset + info->gp_size;
31560 bool can_use_exit = end_save == 0;
31561 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
31562 int ptr_off;
31564 /* Emit stack reset code if we need it. */
31565 ptr_regno = ptr_regno_for_savres (sel);
31566 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
31567 if (can_use_exit)
31568 rs6000_emit_stack_reset (info, frame_reg_rtx, frame_off, ptr_regno);
31569 else if (end_save + frame_off != 0)
31570 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
31571 GEN_INT (end_save + frame_off)));
31572 else if (REGNO (frame_reg_rtx) != ptr_regno)
31573 emit_move_insn (ptr_reg, frame_reg_rtx);
31574 if (REGNO (frame_reg_rtx) == ptr_regno)
31575 frame_off = -end_save;
31577 if (can_use_exit && info->cr_save_p)
31578 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
31580 ptr_off = -end_save;
31581 rs6000_emit_savres_rtx (info, ptr_reg,
31582 info->gp_save_offset + ptr_off,
31583 info->lr_save_offset + ptr_off,
31584 reg_mode, sel);
31586 else if (using_load_multiple)
31588 rtvec p;
31589 p = rtvec_alloc (32 - info->first_gp_reg_save);
31590 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
31591 RTVEC_ELT (p, i)
31592 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
31593 frame_reg_rtx,
31594 info->gp_save_offset + frame_off + reg_size * i);
31595 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
31597 else
31599 int offset = info->gp_save_offset + frame_off;
31600 for (i = info->first_gp_reg_save; i < 32; i++)
31602 if (rs6000_reg_live_or_pic_offset_p (i)
31603 && !cfun->machine->gpr_is_wrapped_separately[i])
31605 rtx reg = gen_rtx_REG (reg_mode, i);
31606 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
31609 offset += reg_size;
31613 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
31615 /* If the frame pointer was used then we can't delay emitting
31616 a REG_CFA_DEF_CFA note. This must happen on the insn that
31617 restores the frame pointer, r31. We may have already emitted
31618 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
31619 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
31620 be harmless if emitted. */
31621 if (frame_pointer_needed)
31623 insn = get_last_insn ();
31624 add_reg_note (insn, REG_CFA_DEF_CFA,
31625 plus_constant (Pmode, frame_reg_rtx, frame_off));
31626 RTX_FRAME_RELATED_P (insn) = 1;
31629 /* Set up cfa_restores. We always need these when
31630 shrink-wrapping. If not shrink-wrapping then we only need
31631 the cfa_restore when the stack location is no longer valid.
31632 The cfa_restores must be emitted on or before the insn that
31633 invalidates the stack, and of course must not be emitted
31634 before the insn that actually does the restore. The latter
31635 is why it is a bad idea to emit the cfa_restores as a group
31636 on the last instruction here that actually does a restore:
31637 That insn may be reordered with respect to others doing
31638 restores. */
31639 if (flag_shrink_wrap
31640 && !restoring_GPRs_inline
31641 && info->first_fp_reg_save == 64)
31642 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
31644 for (i = info->first_gp_reg_save; i < 32; i++)
31645 if (!restoring_GPRs_inline
31646 || using_load_multiple
31647 || rs6000_reg_live_or_pic_offset_p (i))
31649 if (cfun->machine->gpr_is_wrapped_separately[i])
31650 continue;
31652 rtx reg = gen_rtx_REG (reg_mode, i);
31653 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
31657 if (!restoring_GPRs_inline
31658 && info->first_fp_reg_save == 64)
31660 /* We are jumping to an out-of-line function. */
31661 if (cfa_restores)
31662 emit_cfa_restores (cfa_restores);
31663 return;
31666 if (restore_lr && !restoring_GPRs_inline)
31668 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
31669 restore_saved_lr (0, exit_func);
31672 /* Restore fpr's if we need to do it without calling a function. */
31673 if (restoring_FPRs_inline)
31675 int offset = info->fp_save_offset + frame_off;
31676 for (i = info->first_fp_reg_save; i < 64; i++)
31678 if (save_reg_p (i)
31679 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
31681 rtx reg = gen_rtx_REG (fp_reg_mode, i);
31682 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
31683 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
31684 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
31685 cfa_restores);
31688 offset += fp_reg_size;
31692 /* If we saved cr, restore it here. Just those that were used. */
31693 if (info->cr_save_p)
31694 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
31696 /* If this is V.4, unwind the stack pointer after all of the loads
31697 have been done, or set up r11 if we are restoring fp out of line. */
31698 ptr_regno = 1;
31699 if (!restoring_FPRs_inline)
31701 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
31702 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
31703 ptr_regno = ptr_regno_for_savres (sel);
31706 insn = rs6000_emit_stack_reset (info, frame_reg_rtx, frame_off, ptr_regno);
31707 if (REGNO (frame_reg_rtx) == ptr_regno)
31708 frame_off = 0;
31710 if (insn && restoring_FPRs_inline)
31712 if (cfa_restores)
31714 REG_NOTES (insn) = cfa_restores;
31715 cfa_restores = NULL_RTX;
31717 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
31718 RTX_FRAME_RELATED_P (insn) = 1;
31721 if (crtl->calls_eh_return)
31723 rtx sa = EH_RETURN_STACKADJ_RTX;
31724 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
31727 if (!sibcall && restoring_FPRs_inline)
31729 if (cfa_restores)
31731 /* We can't hang the cfa_restores off a simple return,
31732 since the shrink-wrap code sometimes uses an existing
31733 return. This means there might be a path from
31734 pre-prologue code to this return, and dwarf2cfi code
31735 wants the eh_frame unwinder state to be the same on
31736 all paths to any point. So we need to emit the
31737 cfa_restores before the return. For -m64 we really
31738 don't need epilogue cfa_restores at all, except for
31739 this irritating dwarf2cfi with shrink-wrap
31740 requirement; The stack red-zone means eh_frame info
31741 from the prologue telling the unwinder to restore
31742 from the stack is perfectly good right to the end of
31743 the function. */
31744 emit_insn (gen_blockage ());
31745 emit_cfa_restores (cfa_restores);
31746 cfa_restores = NULL_RTX;
31749 emit_jump_insn (targetm.gen_simple_return ());
31752 if (!sibcall && !restoring_FPRs_inline)
31754 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
31755 rtvec p = rtvec_alloc (3 + !!lr + 64 - info->first_fp_reg_save);
31756 int elt = 0;
31757 RTVEC_ELT (p, elt++) = ret_rtx;
31758 if (lr)
31759 RTVEC_ELT (p, elt++)
31760 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
31762 /* We have to restore more than two FP registers, so branch to the
31763 restore function. It will return to our caller. */
31764 int i;
31765 int reg;
31766 rtx sym;
31768 if (flag_shrink_wrap)
31769 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
31771 sym = rs6000_savres_routine_sym (info, SAVRES_FPR | (lr ? SAVRES_LR : 0));
31772 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, sym);
31773 reg = (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)? 1 : 11;
31774 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, reg));
31776 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
31778 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
31780 RTVEC_ELT (p, elt++)
31781 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
31782 if (flag_shrink_wrap)
31783 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
31786 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
31789 if (cfa_restores)
31791 if (sibcall)
31792 /* Ensure the cfa_restores are hung off an insn that won't
31793 be reordered above other restores. */
31794 emit_insn (gen_blockage ());
31796 emit_cfa_restores (cfa_restores);
31800 /* Write function epilogue. */
31802 static void
31803 rs6000_output_function_epilogue (FILE *file,
31804 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
31806 #if TARGET_MACHO
31807 macho_branch_islands ();
31810 rtx_insn *insn = get_last_insn ();
31811 rtx_insn *deleted_debug_label = NULL;
31813 /* Mach-O doesn't support labels at the end of objects, so if
31814 it looks like we might want one, take special action.
31816 First, collect any sequence of deleted debug labels. */
31817 while (insn
31818 && NOTE_P (insn)
31819 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
31821 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
31822 notes only, instead set their CODE_LABEL_NUMBER to -1,
31823 otherwise there would be code generation differences
31824 in between -g and -g0. */
31825 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
31826 deleted_debug_label = insn;
31827 insn = PREV_INSN (insn);
31830 /* Second, if we have:
31831 label:
31832 barrier
31833 then this needs to be detected, so skip past the barrier. */
31835 if (insn && BARRIER_P (insn))
31836 insn = PREV_INSN (insn);
31838 /* Up to now we've only seen notes or barriers. */
31839 if (insn)
31841 if (LABEL_P (insn)
31842 || (NOTE_P (insn)
31843 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL))
31844 /* Trailing label: <barrier>. */
31845 fputs ("\tnop\n", file);
31846 else
31848 /* Lastly, see if we have a completely empty function body. */
31849 while (insn && ! INSN_P (insn))
31850 insn = PREV_INSN (insn);
31851 /* If we don't find any insns, we've got an empty function body;
31852 I.e. completely empty - without a return or branch. This is
31853 taken as the case where a function body has been removed
31854 because it contains an inline __builtin_unreachable(). GCC
31855 states that reaching __builtin_unreachable() means UB so we're
31856 not obliged to do anything special; however, we want
31857 non-zero-sized function bodies. To meet this, and help the
31858 user out, let's trap the case. */
31859 if (insn == NULL)
31860 fputs ("\ttrap\n", file);
31863 else if (deleted_debug_label)
31864 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
31865 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
31866 CODE_LABEL_NUMBER (insn) = -1;
31868 #endif
31870 /* Output a traceback table here. See /usr/include/sys/debug.h for info
31871 on its format.
31873 We don't output a traceback table if -finhibit-size-directive was
31874 used. The documentation for -finhibit-size-directive reads
31875 ``don't output a @code{.size} assembler directive, or anything
31876 else that would cause trouble if the function is split in the
31877 middle, and the two halves are placed at locations far apart in
31878 memory.'' The traceback table has this property, since it
31879 includes the offset from the start of the function to the
31880 traceback table itself.
31882 System V.4 Powerpc's (and the embedded ABI derived from it) use a
31883 different traceback table. */
31884 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
31885 && ! flag_inhibit_size_directive
31886 && rs6000_traceback != traceback_none && !cfun->is_thunk)
31888 const char *fname = NULL;
31889 const char *language_string = lang_hooks.name;
31890 int fixed_parms = 0, float_parms = 0, parm_info = 0;
31891 int i;
31892 int optional_tbtab;
31893 rs6000_stack_t *info = rs6000_stack_info ();
31895 if (rs6000_traceback == traceback_full)
31896 optional_tbtab = 1;
31897 else if (rs6000_traceback == traceback_part)
31898 optional_tbtab = 0;
31899 else
31900 optional_tbtab = !optimize_size && !TARGET_ELF;
31902 if (optional_tbtab)
31904 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
31905 while (*fname == '.') /* V.4 encodes . in the name */
31906 fname++;
31908 /* Need label immediately before tbtab, so we can compute
31909 its offset from the function start. */
31910 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
31911 ASM_OUTPUT_LABEL (file, fname);
31914 /* The .tbtab pseudo-op can only be used for the first eight
31915 expressions, since it can't handle the possibly variable
31916 length fields that follow. However, if you omit the optional
31917 fields, the assembler outputs zeros for all optional fields
31918 anyways, giving each variable length field is minimum length
31919 (as defined in sys/debug.h). Thus we can not use the .tbtab
31920 pseudo-op at all. */
31922 /* An all-zero word flags the start of the tbtab, for debuggers
31923 that have to find it by searching forward from the entry
31924 point or from the current pc. */
31925 fputs ("\t.long 0\n", file);
31927 /* Tbtab format type. Use format type 0. */
31928 fputs ("\t.byte 0,", file);
31930 /* Language type. Unfortunately, there does not seem to be any
31931 official way to discover the language being compiled, so we
31932 use language_string.
31933 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
31934 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
31935 a number, so for now use 9. LTO, Go and JIT aren't assigned numbers
31936 either, so for now use 0. */
31937 if (lang_GNU_C ()
31938 || ! strcmp (language_string, "GNU GIMPLE")
31939 || ! strcmp (language_string, "GNU Go")
31940 || ! strcmp (language_string, "libgccjit"))
31941 i = 0;
31942 else if (! strcmp (language_string, "GNU F77")
31943 || lang_GNU_Fortran ())
31944 i = 1;
31945 else if (! strcmp (language_string, "GNU Pascal"))
31946 i = 2;
31947 else if (! strcmp (language_string, "GNU Ada"))
31948 i = 3;
31949 else if (lang_GNU_CXX ()
31950 || ! strcmp (language_string, "GNU Objective-C++"))
31951 i = 9;
31952 else if (! strcmp (language_string, "GNU Java"))
31953 i = 13;
31954 else if (! strcmp (language_string, "GNU Objective-C"))
31955 i = 14;
31956 else
31957 gcc_unreachable ();
31958 fprintf (file, "%d,", i);
31960 /* 8 single bit fields: global linkage (not set for C extern linkage,
31961 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
31962 from start of procedure stored in tbtab, internal function, function
31963 has controlled storage, function has no toc, function uses fp,
31964 function logs/aborts fp operations. */
31965 /* Assume that fp operations are used if any fp reg must be saved. */
31966 fprintf (file, "%d,",
31967 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
31969 /* 6 bitfields: function is interrupt handler, name present in
31970 proc table, function calls alloca, on condition directives
31971 (controls stack walks, 3 bits), saves condition reg, saves
31972 link reg. */
31973 /* The `function calls alloca' bit seems to be set whenever reg 31 is
31974 set up as a frame pointer, even when there is no alloca call. */
31975 fprintf (file, "%d,",
31976 ((optional_tbtab << 6)
31977 | ((optional_tbtab & frame_pointer_needed) << 5)
31978 | (info->cr_save_p << 1)
31979 | (info->lr_save_p)));
31981 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
31982 (6 bits). */
31983 fprintf (file, "%d,",
31984 (info->push_p << 7) | (64 - info->first_fp_reg_save));
31986 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
31987 fprintf (file, "%d,", (32 - first_reg_to_save ()));
31989 if (optional_tbtab)
31991 /* Compute the parameter info from the function decl argument
31992 list. */
31993 tree decl;
31994 int next_parm_info_bit = 31;
31996 for (decl = DECL_ARGUMENTS (current_function_decl);
31997 decl; decl = DECL_CHAIN (decl))
31999 rtx parameter = DECL_INCOMING_RTL (decl);
32000 machine_mode mode = GET_MODE (parameter);
32002 if (GET_CODE (parameter) == REG)
32004 if (SCALAR_FLOAT_MODE_P (mode))
32006 int bits;
32008 float_parms++;
32010 switch (mode)
32012 case SFmode:
32013 case SDmode:
32014 bits = 0x2;
32015 break;
32017 case DFmode:
32018 case DDmode:
32019 case TFmode:
32020 case TDmode:
32021 case IFmode:
32022 case KFmode:
32023 bits = 0x3;
32024 break;
32026 default:
32027 gcc_unreachable ();
32030 /* If only one bit will fit, don't or in this entry. */
32031 if (next_parm_info_bit > 0)
32032 parm_info |= (bits << (next_parm_info_bit - 1));
32033 next_parm_info_bit -= 2;
32035 else
32037 fixed_parms += ((GET_MODE_SIZE (mode)
32038 + (UNITS_PER_WORD - 1))
32039 / UNITS_PER_WORD);
32040 next_parm_info_bit -= 1;
32046 /* Number of fixed point parameters. */
32047 /* This is actually the number of words of fixed point parameters; thus
32048 an 8 byte struct counts as 2; and thus the maximum value is 8. */
32049 fprintf (file, "%d,", fixed_parms);
32051 /* 2 bitfields: number of floating point parameters (7 bits), parameters
32052 all on stack. */
32053 /* This is actually the number of fp registers that hold parameters;
32054 and thus the maximum value is 13. */
32055 /* Set parameters on stack bit if parameters are not in their original
32056 registers, regardless of whether they are on the stack? Xlc
32057 seems to set the bit when not optimizing. */
32058 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
32060 if (optional_tbtab)
32062 /* Optional fields follow. Some are variable length. */
32064 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single
32065 float, 11 double float. */
32066 /* There is an entry for each parameter in a register, in the order
32067 that they occur in the parameter list. Any intervening arguments
32068 on the stack are ignored. If the list overflows a long (max
32069 possible length 34 bits) then completely leave off all elements
32070 that don't fit. */
32071 /* Only emit this long if there was at least one parameter. */
32072 if (fixed_parms || float_parms)
32073 fprintf (file, "\t.long %d\n", parm_info);
32075 /* Offset from start of code to tb table. */
32076 fputs ("\t.long ", file);
32077 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
32078 RS6000_OUTPUT_BASENAME (file, fname);
32079 putc ('-', file);
32080 rs6000_output_function_entry (file, fname);
32081 putc ('\n', file);
32083 /* Interrupt handler mask. */
32084 /* Omit this long, since we never set the interrupt handler bit
32085 above. */
32087 /* Number of CTL (controlled storage) anchors. */
32088 /* Omit this long, since the has_ctl bit is never set above. */
32090 /* Displacement into stack of each CTL anchor. */
32091 /* Omit this list of longs, because there are no CTL anchors. */
32093 /* Length of function name. */
32094 if (*fname == '*')
32095 ++fname;
32096 fprintf (file, "\t.short %d\n", (int) strlen (fname));
32098 /* Function name. */
32099 assemble_string (fname, strlen (fname));
32101 /* Register for alloca automatic storage; this is always reg 31.
32102 Only emit this if the alloca bit was set above. */
32103 if (frame_pointer_needed)
32104 fputs ("\t.byte 31\n", file);
32106 fputs ("\t.align 2\n", file);
32110 /* Arrange to define .LCTOC1 label, if not already done. */
32111 if (need_toc_init)
32113 need_toc_init = 0;
32114 if (!toc_initialized)
32116 switch_to_section (toc_section);
32117 switch_to_section (current_function_section ());
32122 /* -fsplit-stack support. */
32124 /* A SYMBOL_REF for __morestack. */
32125 static GTY(()) rtx morestack_ref;
32127 static rtx
32128 gen_add3_const (rtx rt, rtx ra, long c)
32130 if (TARGET_64BIT)
32131 return gen_adddi3 (rt, ra, GEN_INT (c));
32132 else
32133 return gen_addsi3 (rt, ra, GEN_INT (c));
32136 /* Emit -fsplit-stack prologue, which goes before the regular function
32137 prologue (at local entry point in the case of ELFv2). */
32139 void
32140 rs6000_expand_split_stack_prologue (void)
32142 rs6000_stack_t *info = rs6000_stack_info ();
32143 unsigned HOST_WIDE_INT allocate;
32144 long alloc_hi, alloc_lo;
32145 rtx r0, r1, r12, lr, ok_label, compare, jump, call_fusage;
32146 rtx_insn *insn;
32148 gcc_assert (flag_split_stack && reload_completed);
32150 if (!info->push_p)
32151 return;
32153 if (global_regs[29])
32155 error ("-fsplit-stack uses register r29");
32156 inform (DECL_SOURCE_LOCATION (global_regs_decl[29]),
32157 "conflicts with %qD", global_regs_decl[29]);
32160 allocate = info->total_size;
32161 if (allocate > (unsigned HOST_WIDE_INT) 1 << 31)
32163 sorry ("Stack frame larger than 2G is not supported for -fsplit-stack");
32164 return;
32166 if (morestack_ref == NULL_RTX)
32168 morestack_ref = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
32169 SYMBOL_REF_FLAGS (morestack_ref) |= (SYMBOL_FLAG_LOCAL
32170 | SYMBOL_FLAG_FUNCTION);
32173 r0 = gen_rtx_REG (Pmode, 0);
32174 r1 = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
32175 r12 = gen_rtx_REG (Pmode, 12);
32176 emit_insn (gen_load_split_stack_limit (r0));
32177 /* Always emit two insns here to calculate the requested stack,
32178 so that the linker can edit them when adjusting size for calling
32179 non-split-stack code. */
32180 alloc_hi = (-allocate + 0x8000) & ~0xffffL;
32181 alloc_lo = -allocate - alloc_hi;
32182 if (alloc_hi != 0)
32184 emit_insn (gen_add3_const (r12, r1, alloc_hi));
32185 if (alloc_lo != 0)
32186 emit_insn (gen_add3_const (r12, r12, alloc_lo));
32187 else
32188 emit_insn (gen_nop ());
32190 else
32192 emit_insn (gen_add3_const (r12, r1, alloc_lo));
32193 emit_insn (gen_nop ());
32196 compare = gen_rtx_REG (CCUNSmode, CR7_REGNO);
32197 emit_insn (gen_rtx_SET (compare, gen_rtx_COMPARE (CCUNSmode, r12, r0)));
32198 ok_label = gen_label_rtx ();
32199 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
32200 gen_rtx_GEU (VOIDmode, compare, const0_rtx),
32201 gen_rtx_LABEL_REF (VOIDmode, ok_label),
32202 pc_rtx);
32203 insn = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
32204 JUMP_LABEL (insn) = ok_label;
32205 /* Mark the jump as very likely to be taken. */
32206 add_int_reg_note (insn, REG_BR_PROB,
32207 REG_BR_PROB_BASE - REG_BR_PROB_BASE / 100);
32209 lr = gen_rtx_REG (Pmode, LR_REGNO);
32210 insn = emit_move_insn (r0, lr);
32211 RTX_FRAME_RELATED_P (insn) = 1;
32212 insn = emit_insn (gen_frame_store (r0, r1, info->lr_save_offset));
32213 RTX_FRAME_RELATED_P (insn) = 1;
32215 insn = emit_call_insn (gen_call (gen_rtx_MEM (SImode, morestack_ref),
32216 const0_rtx, const0_rtx));
32217 call_fusage = NULL_RTX;
32218 use_reg (&call_fusage, r12);
32219 /* Say the call uses r0, even though it doesn't, to stop regrename
32220 from twiddling with the insns saving lr, trashing args for cfun.
32221 The insns restoring lr are similarly protected by making
32222 split_stack_return use r0. */
32223 use_reg (&call_fusage, r0);
32224 add_function_usage_to (insn, call_fusage);
32225 /* Indicate that this function can't jump to non-local gotos. */
32226 make_reg_eh_region_note_nothrow_nononlocal (insn);
32227 emit_insn (gen_frame_load (r0, r1, info->lr_save_offset));
32228 insn = emit_move_insn (lr, r0);
32229 add_reg_note (insn, REG_CFA_RESTORE, lr);
32230 RTX_FRAME_RELATED_P (insn) = 1;
32231 emit_insn (gen_split_stack_return ());
32233 emit_label (ok_label);
32234 LABEL_NUSES (ok_label) = 1;
32237 /* Return the internal arg pointer used for function incoming
32238 arguments. When -fsplit-stack, the arg pointer is r12 so we need
32239 to copy it to a pseudo in order for it to be preserved over calls
32240 and suchlike. We'd really like to use a pseudo here for the
32241 internal arg pointer but data-flow analysis is not prepared to
32242 accept pseudos as live at the beginning of a function. */
32244 static rtx
32245 rs6000_internal_arg_pointer (void)
32247 if (flag_split_stack
32248 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
32249 == NULL))
32252 if (cfun->machine->split_stack_arg_pointer == NULL_RTX)
32254 rtx pat;
32256 cfun->machine->split_stack_arg_pointer = gen_reg_rtx (Pmode);
32257 REG_POINTER (cfun->machine->split_stack_arg_pointer) = 1;
32259 /* Put the pseudo initialization right after the note at the
32260 beginning of the function. */
32261 pat = gen_rtx_SET (cfun->machine->split_stack_arg_pointer,
32262 gen_rtx_REG (Pmode, 12));
32263 push_topmost_sequence ();
32264 emit_insn_after (pat, get_insns ());
32265 pop_topmost_sequence ();
32267 return plus_constant (Pmode, cfun->machine->split_stack_arg_pointer,
32268 FIRST_PARM_OFFSET (current_function_decl));
32270 return virtual_incoming_args_rtx;
32273 /* We may have to tell the dataflow pass that the split stack prologue
32274 is initializing a register. */
32276 static void
32277 rs6000_live_on_entry (bitmap regs)
32279 if (flag_split_stack)
32280 bitmap_set_bit (regs, 12);
32283 /* Emit -fsplit-stack dynamic stack allocation space check. */
32285 void
32286 rs6000_split_stack_space_check (rtx size, rtx label)
32288 rtx sp = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
32289 rtx limit = gen_reg_rtx (Pmode);
32290 rtx requested = gen_reg_rtx (Pmode);
32291 rtx cmp = gen_reg_rtx (CCUNSmode);
32292 rtx jump;
32294 emit_insn (gen_load_split_stack_limit (limit));
32295 if (CONST_INT_P (size))
32296 emit_insn (gen_add3_insn (requested, sp, GEN_INT (-INTVAL (size))));
32297 else
32299 size = force_reg (Pmode, size);
32300 emit_move_insn (requested, gen_rtx_MINUS (Pmode, sp, size));
32302 emit_insn (gen_rtx_SET (cmp, gen_rtx_COMPARE (CCUNSmode, requested, limit)));
32303 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
32304 gen_rtx_GEU (VOIDmode, cmp, const0_rtx),
32305 gen_rtx_LABEL_REF (VOIDmode, label),
32306 pc_rtx);
32307 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
32308 JUMP_LABEL (jump) = label;
32311 /* A C compound statement that outputs the assembler code for a thunk
32312 function, used to implement C++ virtual function calls with
32313 multiple inheritance. The thunk acts as a wrapper around a virtual
32314 function, adjusting the implicit object parameter before handing
32315 control off to the real function.
32317 First, emit code to add the integer DELTA to the location that
32318 contains the incoming first argument. Assume that this argument
32319 contains a pointer, and is the one used to pass the `this' pointer
32320 in C++. This is the incoming argument *before* the function
32321 prologue, e.g. `%o0' on a sparc. The addition must preserve the
32322 values of all other incoming arguments.
32324 After the addition, emit code to jump to FUNCTION, which is a
32325 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
32326 not touch the return address. Hence returning from FUNCTION will
32327 return to whoever called the current `thunk'.
32329 The effect must be as if FUNCTION had been called directly with the
32330 adjusted first argument. This macro is responsible for emitting
32331 all of the code for a thunk function; output_function_prologue()
32332 and output_function_epilogue() are not invoked.
32334 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
32335 been extracted from it.) It might possibly be useful on some
32336 targets, but probably not.
32338 If you do not define this macro, the target-independent code in the
32339 C++ frontend will generate a less efficient heavyweight thunk that
32340 calls FUNCTION instead of jumping to it. The generic approach does
32341 not support varargs. */
32343 static void
32344 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
32345 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
32346 tree function)
32348 rtx this_rtx, funexp;
32349 rtx_insn *insn;
32351 reload_completed = 1;
32352 epilogue_completed = 1;
32354 /* Mark the end of the (empty) prologue. */
32355 emit_note (NOTE_INSN_PROLOGUE_END);
32357 /* Find the "this" pointer. If the function returns a structure,
32358 the structure return pointer is in r3. */
32359 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
32360 this_rtx = gen_rtx_REG (Pmode, 4);
32361 else
32362 this_rtx = gen_rtx_REG (Pmode, 3);
32364 /* Apply the constant offset, if required. */
32365 if (delta)
32366 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
32368 /* Apply the offset from the vtable, if required. */
32369 if (vcall_offset)
32371 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
32372 rtx tmp = gen_rtx_REG (Pmode, 12);
32374 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
32375 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
32377 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
32378 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
32380 else
32382 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
32384 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
32386 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
32389 /* Generate a tail call to the target function. */
32390 if (!TREE_USED (function))
32392 assemble_external (function);
32393 TREE_USED (function) = 1;
32395 funexp = XEXP (DECL_RTL (function), 0);
32396 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
32398 #if TARGET_MACHO
32399 if (MACHOPIC_INDIRECT)
32400 funexp = machopic_indirect_call_target (funexp);
32401 #endif
32403 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
32404 generate sibcall RTL explicitly. */
32405 insn = emit_call_insn (
32406 gen_rtx_PARALLEL (VOIDmode,
32407 gen_rtvec (3,
32408 gen_rtx_CALL (VOIDmode,
32409 funexp, const0_rtx),
32410 gen_rtx_USE (VOIDmode, const0_rtx),
32411 simple_return_rtx)));
32412 SIBLING_CALL_P (insn) = 1;
32413 emit_barrier ();
32415 /* Run just enough of rest_of_compilation to get the insns emitted.
32416 There's not really enough bulk here to make other passes such as
32417 instruction scheduling worth while. Note that use_thunk calls
32418 assemble_start_function and assemble_end_function. */
32419 insn = get_insns ();
32420 shorten_branches (insn);
32421 final_start_function (insn, file, 1);
32422 final (insn, file, 1);
32423 final_end_function ();
32425 reload_completed = 0;
32426 epilogue_completed = 0;
32429 /* A quick summary of the various types of 'constant-pool tables'
32430 under PowerPC:
32432 Target Flags Name One table per
32433 AIX (none) AIX TOC object file
32434 AIX -mfull-toc AIX TOC object file
32435 AIX -mminimal-toc AIX minimal TOC translation unit
32436 SVR4/EABI (none) SVR4 SDATA object file
32437 SVR4/EABI -fpic SVR4 pic object file
32438 SVR4/EABI -fPIC SVR4 PIC translation unit
32439 SVR4/EABI -mrelocatable EABI TOC function
32440 SVR4/EABI -maix AIX TOC object file
32441 SVR4/EABI -maix -mminimal-toc
32442 AIX minimal TOC translation unit
32444 Name Reg. Set by entries contains:
32445 made by addrs? fp? sum?
32447 AIX TOC 2 crt0 as Y option option
32448 AIX minimal TOC 30 prolog gcc Y Y option
32449 SVR4 SDATA 13 crt0 gcc N Y N
32450 SVR4 pic 30 prolog ld Y not yet N
32451 SVR4 PIC 30 prolog gcc Y option option
32452 EABI TOC 30 prolog gcc Y option option
32456 /* Hash functions for the hash table. */
32458 static unsigned
32459 rs6000_hash_constant (rtx k)
32461 enum rtx_code code = GET_CODE (k);
32462 machine_mode mode = GET_MODE (k);
32463 unsigned result = (code << 3) ^ mode;
32464 const char *format;
32465 int flen, fidx;
32467 format = GET_RTX_FORMAT (code);
32468 flen = strlen (format);
32469 fidx = 0;
32471 switch (code)
32473 case LABEL_REF:
32474 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
32476 case CONST_WIDE_INT:
32478 int i;
32479 flen = CONST_WIDE_INT_NUNITS (k);
32480 for (i = 0; i < flen; i++)
32481 result = result * 613 + CONST_WIDE_INT_ELT (k, i);
32482 return result;
32485 case CONST_DOUBLE:
32486 if (mode != VOIDmode)
32487 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
32488 flen = 2;
32489 break;
32491 case CODE_LABEL:
32492 fidx = 3;
32493 break;
32495 default:
32496 break;
32499 for (; fidx < flen; fidx++)
32500 switch (format[fidx])
32502 case 's':
32504 unsigned i, len;
32505 const char *str = XSTR (k, fidx);
32506 len = strlen (str);
32507 result = result * 613 + len;
32508 for (i = 0; i < len; i++)
32509 result = result * 613 + (unsigned) str[i];
32510 break;
32512 case 'u':
32513 case 'e':
32514 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
32515 break;
32516 case 'i':
32517 case 'n':
32518 result = result * 613 + (unsigned) XINT (k, fidx);
32519 break;
32520 case 'w':
32521 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
32522 result = result * 613 + (unsigned) XWINT (k, fidx);
32523 else
32525 size_t i;
32526 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
32527 result = result * 613 + (unsigned) (XWINT (k, fidx)
32528 >> CHAR_BIT * i);
32530 break;
32531 case '0':
32532 break;
32533 default:
32534 gcc_unreachable ();
32537 return result;
32540 hashval_t
32541 toc_hasher::hash (toc_hash_struct *thc)
32543 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
32546 /* Compare H1 and H2 for equivalence. */
32548 bool
32549 toc_hasher::equal (toc_hash_struct *h1, toc_hash_struct *h2)
32551 rtx r1 = h1->key;
32552 rtx r2 = h2->key;
32554 if (h1->key_mode != h2->key_mode)
32555 return 0;
32557 return rtx_equal_p (r1, r2);
32560 /* These are the names given by the C++ front-end to vtables, and
32561 vtable-like objects. Ideally, this logic should not be here;
32562 instead, there should be some programmatic way of inquiring as
32563 to whether or not an object is a vtable. */
32565 #define VTABLE_NAME_P(NAME) \
32566 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
32567 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
32568 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
32569 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
32570 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
32572 #ifdef NO_DOLLAR_IN_LABEL
32573 /* Return a GGC-allocated character string translating dollar signs in
32574 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
32576 const char *
32577 rs6000_xcoff_strip_dollar (const char *name)
32579 char *strip, *p;
32580 const char *q;
32581 size_t len;
32583 q = (const char *) strchr (name, '$');
32585 if (q == 0 || q == name)
32586 return name;
32588 len = strlen (name);
32589 strip = XALLOCAVEC (char, len + 1);
32590 strcpy (strip, name);
32591 p = strip + (q - name);
32592 while (p)
32594 *p = '_';
32595 p = strchr (p + 1, '$');
32598 return ggc_alloc_string (strip, len);
32600 #endif
32602 void
32603 rs6000_output_symbol_ref (FILE *file, rtx x)
32605 const char *name = XSTR (x, 0);
32607 /* Currently C++ toc references to vtables can be emitted before it
32608 is decided whether the vtable is public or private. If this is
32609 the case, then the linker will eventually complain that there is
32610 a reference to an unknown section. Thus, for vtables only,
32611 we emit the TOC reference to reference the identifier and not the
32612 symbol. */
32613 if (VTABLE_NAME_P (name))
32615 RS6000_OUTPUT_BASENAME (file, name);
32617 else
32618 assemble_name (file, name);
32621 /* Output a TOC entry. We derive the entry name from what is being
32622 written. */
32624 void
32625 output_toc (FILE *file, rtx x, int labelno, machine_mode mode)
32627 char buf[256];
32628 const char *name = buf;
32629 rtx base = x;
32630 HOST_WIDE_INT offset = 0;
32632 gcc_assert (!TARGET_NO_TOC);
32634 /* When the linker won't eliminate them, don't output duplicate
32635 TOC entries (this happens on AIX if there is any kind of TOC,
32636 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
32637 CODE_LABELs. */
32638 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
32640 struct toc_hash_struct *h;
32642 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
32643 time because GGC is not initialized at that point. */
32644 if (toc_hash_table == NULL)
32645 toc_hash_table = hash_table<toc_hasher>::create_ggc (1021);
32647 h = ggc_alloc<toc_hash_struct> ();
32648 h->key = x;
32649 h->key_mode = mode;
32650 h->labelno = labelno;
32652 toc_hash_struct **found = toc_hash_table->find_slot (h, INSERT);
32653 if (*found == NULL)
32654 *found = h;
32655 else /* This is indeed a duplicate.
32656 Set this label equal to that label. */
32658 fputs ("\t.set ", file);
32659 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
32660 fprintf (file, "%d,", labelno);
32661 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
32662 fprintf (file, "%d\n", ((*found)->labelno));
32664 #ifdef HAVE_AS_TLS
32665 if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF
32666 && (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC
32667 || SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC))
32669 fputs ("\t.set ", file);
32670 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
32671 fprintf (file, "%d,", labelno);
32672 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
32673 fprintf (file, "%d\n", ((*found)->labelno));
32675 #endif
32676 return;
32680 /* If we're going to put a double constant in the TOC, make sure it's
32681 aligned properly when strict alignment is on. */
32682 if ((CONST_DOUBLE_P (x) || CONST_WIDE_INT_P (x))
32683 && STRICT_ALIGNMENT
32684 && GET_MODE_BITSIZE (mode) >= 64
32685 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
32686 ASM_OUTPUT_ALIGN (file, 3);
32689 (*targetm.asm_out.internal_label) (file, "LC", labelno);
32691 /* Handle FP constants specially. Note that if we have a minimal
32692 TOC, things we put here aren't actually in the TOC, so we can allow
32693 FP constants. */
32694 if (GET_CODE (x) == CONST_DOUBLE &&
32695 (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode
32696 || GET_MODE (x) == IFmode || GET_MODE (x) == KFmode))
32698 long k[4];
32700 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
32701 REAL_VALUE_TO_TARGET_DECIMAL128 (*CONST_DOUBLE_REAL_VALUE (x), k);
32702 else
32703 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
32705 if (TARGET_64BIT)
32707 if (TARGET_ELF || TARGET_MINIMAL_TOC)
32708 fputs (DOUBLE_INT_ASM_OP, file);
32709 else
32710 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
32711 k[0] & 0xffffffff, k[1] & 0xffffffff,
32712 k[2] & 0xffffffff, k[3] & 0xffffffff);
32713 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
32714 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
32715 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff,
32716 k[WORDS_BIG_ENDIAN ? 2 : 3] & 0xffffffff,
32717 k[WORDS_BIG_ENDIAN ? 3 : 2] & 0xffffffff);
32718 return;
32720 else
32722 if (TARGET_ELF || TARGET_MINIMAL_TOC)
32723 fputs ("\t.long ", file);
32724 else
32725 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
32726 k[0] & 0xffffffff, k[1] & 0xffffffff,
32727 k[2] & 0xffffffff, k[3] & 0xffffffff);
32728 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
32729 k[0] & 0xffffffff, k[1] & 0xffffffff,
32730 k[2] & 0xffffffff, k[3] & 0xffffffff);
32731 return;
32734 else if (GET_CODE (x) == CONST_DOUBLE &&
32735 (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
32737 long k[2];
32739 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
32740 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (x), k);
32741 else
32742 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
32744 if (TARGET_64BIT)
32746 if (TARGET_ELF || TARGET_MINIMAL_TOC)
32747 fputs (DOUBLE_INT_ASM_OP, file);
32748 else
32749 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
32750 k[0] & 0xffffffff, k[1] & 0xffffffff);
32751 fprintf (file, "0x%lx%08lx\n",
32752 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
32753 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff);
32754 return;
32756 else
32758 if (TARGET_ELF || TARGET_MINIMAL_TOC)
32759 fputs ("\t.long ", file);
32760 else
32761 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
32762 k[0] & 0xffffffff, k[1] & 0xffffffff);
32763 fprintf (file, "0x%lx,0x%lx\n",
32764 k[0] & 0xffffffff, k[1] & 0xffffffff);
32765 return;
32768 else if (GET_CODE (x) == CONST_DOUBLE &&
32769 (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
32771 long l;
32773 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
32774 REAL_VALUE_TO_TARGET_DECIMAL32 (*CONST_DOUBLE_REAL_VALUE (x), l);
32775 else
32776 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x), l);
32778 if (TARGET_64BIT)
32780 if (TARGET_ELF || TARGET_MINIMAL_TOC)
32781 fputs (DOUBLE_INT_ASM_OP, file);
32782 else
32783 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
32784 if (WORDS_BIG_ENDIAN)
32785 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
32786 else
32787 fprintf (file, "0x%lx\n", l & 0xffffffff);
32788 return;
32790 else
32792 if (TARGET_ELF || TARGET_MINIMAL_TOC)
32793 fputs ("\t.long ", file);
32794 else
32795 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
32796 fprintf (file, "0x%lx\n", l & 0xffffffff);
32797 return;
32800 else if (GET_MODE (x) == VOIDmode && GET_CODE (x) == CONST_INT)
32802 unsigned HOST_WIDE_INT low;
32803 HOST_WIDE_INT high;
32805 low = INTVAL (x) & 0xffffffff;
32806 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
32808 /* TOC entries are always Pmode-sized, so when big-endian
32809 smaller integer constants in the TOC need to be padded.
32810 (This is still a win over putting the constants in
32811 a separate constant pool, because then we'd have
32812 to have both a TOC entry _and_ the actual constant.)
32814 For a 32-bit target, CONST_INT values are loaded and shifted
32815 entirely within `low' and can be stored in one TOC entry. */
32817 /* It would be easy to make this work, but it doesn't now. */
32818 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
32820 if (WORDS_BIG_ENDIAN && POINTER_SIZE > GET_MODE_BITSIZE (mode))
32822 low |= high << 32;
32823 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
32824 high = (HOST_WIDE_INT) low >> 32;
32825 low &= 0xffffffff;
32828 if (TARGET_64BIT)
32830 if (TARGET_ELF || TARGET_MINIMAL_TOC)
32831 fputs (DOUBLE_INT_ASM_OP, file);
32832 else
32833 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
32834 (long) high & 0xffffffff, (long) low & 0xffffffff);
32835 fprintf (file, "0x%lx%08lx\n",
32836 (long) high & 0xffffffff, (long) low & 0xffffffff);
32837 return;
32839 else
32841 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
32843 if (TARGET_ELF || TARGET_MINIMAL_TOC)
32844 fputs ("\t.long ", file);
32845 else
32846 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
32847 (long) high & 0xffffffff, (long) low & 0xffffffff);
32848 fprintf (file, "0x%lx,0x%lx\n",
32849 (long) high & 0xffffffff, (long) low & 0xffffffff);
32851 else
32853 if (TARGET_ELF || TARGET_MINIMAL_TOC)
32854 fputs ("\t.long ", file);
32855 else
32856 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
32857 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
32859 return;
32863 if (GET_CODE (x) == CONST)
32865 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
32866 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT);
32868 base = XEXP (XEXP (x, 0), 0);
32869 offset = INTVAL (XEXP (XEXP (x, 0), 1));
32872 switch (GET_CODE (base))
32874 case SYMBOL_REF:
32875 name = XSTR (base, 0);
32876 break;
32878 case LABEL_REF:
32879 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
32880 CODE_LABEL_NUMBER (XEXP (base, 0)));
32881 break;
32883 case CODE_LABEL:
32884 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
32885 break;
32887 default:
32888 gcc_unreachable ();
32891 if (TARGET_ELF || TARGET_MINIMAL_TOC)
32892 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
32893 else
32895 fputs ("\t.tc ", file);
32896 RS6000_OUTPUT_BASENAME (file, name);
32898 if (offset < 0)
32899 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
32900 else if (offset)
32901 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
32903 /* Mark large TOC symbols on AIX with [TE] so they are mapped
32904 after other TOC symbols, reducing overflow of small TOC access
32905 to [TC] symbols. */
32906 fputs (TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL
32907 ? "[TE]," : "[TC],", file);
32910 /* Currently C++ toc references to vtables can be emitted before it
32911 is decided whether the vtable is public or private. If this is
32912 the case, then the linker will eventually complain that there is
32913 a TOC reference to an unknown section. Thus, for vtables only,
32914 we emit the TOC reference to reference the symbol and not the
32915 section. */
32916 if (VTABLE_NAME_P (name))
32918 RS6000_OUTPUT_BASENAME (file, name);
32919 if (offset < 0)
32920 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
32921 else if (offset > 0)
32922 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
32924 else
32925 output_addr_const (file, x);
32927 #if HAVE_AS_TLS
32928 if (TARGET_XCOFF && GET_CODE (base) == SYMBOL_REF)
32930 switch (SYMBOL_REF_TLS_MODEL (base))
32932 case 0:
32933 break;
32934 case TLS_MODEL_LOCAL_EXEC:
32935 fputs ("@le", file);
32936 break;
32937 case TLS_MODEL_INITIAL_EXEC:
32938 fputs ("@ie", file);
32939 break;
32940 /* Use global-dynamic for local-dynamic. */
32941 case TLS_MODEL_GLOBAL_DYNAMIC:
32942 case TLS_MODEL_LOCAL_DYNAMIC:
32943 putc ('\n', file);
32944 (*targetm.asm_out.internal_label) (file, "LCM", labelno);
32945 fputs ("\t.tc .", file);
32946 RS6000_OUTPUT_BASENAME (file, name);
32947 fputs ("[TC],", file);
32948 output_addr_const (file, x);
32949 fputs ("@m", file);
32950 break;
32951 default:
32952 gcc_unreachable ();
32955 #endif
32957 putc ('\n', file);
32960 /* Output an assembler pseudo-op to write an ASCII string of N characters
32961 starting at P to FILE.
32963 On the RS/6000, we have to do this using the .byte operation and
32964 write out special characters outside the quoted string.
32965 Also, the assembler is broken; very long strings are truncated,
32966 so we must artificially break them up early. */
32968 void
32969 output_ascii (FILE *file, const char *p, int n)
32971 char c;
32972 int i, count_string;
32973 const char *for_string = "\t.byte \"";
32974 const char *for_decimal = "\t.byte ";
32975 const char *to_close = NULL;
32977 count_string = 0;
32978 for (i = 0; i < n; i++)
32980 c = *p++;
32981 if (c >= ' ' && c < 0177)
32983 if (for_string)
32984 fputs (for_string, file);
32985 putc (c, file);
32987 /* Write two quotes to get one. */
32988 if (c == '"')
32990 putc (c, file);
32991 ++count_string;
32994 for_string = NULL;
32995 for_decimal = "\"\n\t.byte ";
32996 to_close = "\"\n";
32997 ++count_string;
32999 if (count_string >= 512)
33001 fputs (to_close, file);
33003 for_string = "\t.byte \"";
33004 for_decimal = "\t.byte ";
33005 to_close = NULL;
33006 count_string = 0;
33009 else
33011 if (for_decimal)
33012 fputs (for_decimal, file);
33013 fprintf (file, "%d", c);
33015 for_string = "\n\t.byte \"";
33016 for_decimal = ", ";
33017 to_close = "\n";
33018 count_string = 0;
33022 /* Now close the string if we have written one. Then end the line. */
33023 if (to_close)
33024 fputs (to_close, file);
33027 /* Generate a unique section name for FILENAME for a section type
33028 represented by SECTION_DESC. Output goes into BUF.
33030 SECTION_DESC can be any string, as long as it is different for each
33031 possible section type.
33033 We name the section in the same manner as xlc. The name begins with an
33034 underscore followed by the filename (after stripping any leading directory
33035 names) with the last period replaced by the string SECTION_DESC. If
33036 FILENAME does not contain a period, SECTION_DESC is appended to the end of
33037 the name. */
33039 void
33040 rs6000_gen_section_name (char **buf, const char *filename,
33041 const char *section_desc)
33043 const char *q, *after_last_slash, *last_period = 0;
33044 char *p;
33045 int len;
33047 after_last_slash = filename;
33048 for (q = filename; *q; q++)
33050 if (*q == '/')
33051 after_last_slash = q + 1;
33052 else if (*q == '.')
33053 last_period = q;
33056 len = strlen (after_last_slash) + strlen (section_desc) + 2;
33057 *buf = (char *) xmalloc (len);
33059 p = *buf;
33060 *p++ = '_';
33062 for (q = after_last_slash; *q; q++)
33064 if (q == last_period)
33066 strcpy (p, section_desc);
33067 p += strlen (section_desc);
33068 break;
33071 else if (ISALNUM (*q))
33072 *p++ = *q;
33075 if (last_period == 0)
33076 strcpy (p, section_desc);
33077 else
33078 *p = '\0';
33081 /* Emit profile function. */
33083 void
33084 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
33086 /* Non-standard profiling for kernels, which just saves LR then calls
33087 _mcount without worrying about arg saves. The idea is to change
33088 the function prologue as little as possible as it isn't easy to
33089 account for arg save/restore code added just for _mcount. */
33090 if (TARGET_PROFILE_KERNEL)
33091 return;
33093 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
33095 #ifndef NO_PROFILE_COUNTERS
33096 # define NO_PROFILE_COUNTERS 0
33097 #endif
33098 if (NO_PROFILE_COUNTERS)
33099 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
33100 LCT_NORMAL, VOIDmode, 0);
33101 else
33103 char buf[30];
33104 const char *label_name;
33105 rtx fun;
33107 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
33108 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
33109 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
33111 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
33112 LCT_NORMAL, VOIDmode, 1, fun, Pmode);
33115 else if (DEFAULT_ABI == ABI_DARWIN)
33117 const char *mcount_name = RS6000_MCOUNT;
33118 int caller_addr_regno = LR_REGNO;
33120 /* Be conservative and always set this, at least for now. */
33121 crtl->uses_pic_offset_table = 1;
33123 #if TARGET_MACHO
33124 /* For PIC code, set up a stub and collect the caller's address
33125 from r0, which is where the prologue puts it. */
33126 if (MACHOPIC_INDIRECT
33127 && crtl->uses_pic_offset_table)
33128 caller_addr_regno = 0;
33129 #endif
33130 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
33131 LCT_NORMAL, VOIDmode, 1,
33132 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
33136 /* Write function profiler code. */
33138 void
33139 output_function_profiler (FILE *file, int labelno)
33141 char buf[100];
33143 switch (DEFAULT_ABI)
33145 default:
33146 gcc_unreachable ();
33148 case ABI_V4:
33149 if (!TARGET_32BIT)
33151 warning (0, "no profiling of 64-bit code for this ABI");
33152 return;
33154 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
33155 fprintf (file, "\tmflr %s\n", reg_names[0]);
33156 if (NO_PROFILE_COUNTERS)
33158 asm_fprintf (file, "\tstw %s,4(%s)\n",
33159 reg_names[0], reg_names[1]);
33161 else if (TARGET_SECURE_PLT && flag_pic)
33163 if (TARGET_LINK_STACK)
33165 char name[32];
33166 get_ppc476_thunk_name (name);
33167 asm_fprintf (file, "\tbl %s\n", name);
33169 else
33170 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
33171 asm_fprintf (file, "\tstw %s,4(%s)\n",
33172 reg_names[0], reg_names[1]);
33173 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
33174 asm_fprintf (file, "\taddis %s,%s,",
33175 reg_names[12], reg_names[12]);
33176 assemble_name (file, buf);
33177 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
33178 assemble_name (file, buf);
33179 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
33181 else if (flag_pic == 1)
33183 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
33184 asm_fprintf (file, "\tstw %s,4(%s)\n",
33185 reg_names[0], reg_names[1]);
33186 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
33187 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
33188 assemble_name (file, buf);
33189 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
33191 else if (flag_pic > 1)
33193 asm_fprintf (file, "\tstw %s,4(%s)\n",
33194 reg_names[0], reg_names[1]);
33195 /* Now, we need to get the address of the label. */
33196 if (TARGET_LINK_STACK)
33198 char name[32];
33199 get_ppc476_thunk_name (name);
33200 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
33201 assemble_name (file, buf);
33202 fputs ("-.\n1:", file);
33203 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
33204 asm_fprintf (file, "\taddi %s,%s,4\n",
33205 reg_names[11], reg_names[11]);
33207 else
33209 fputs ("\tbcl 20,31,1f\n\t.long ", file);
33210 assemble_name (file, buf);
33211 fputs ("-.\n1:", file);
33212 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
33214 asm_fprintf (file, "\tlwz %s,0(%s)\n",
33215 reg_names[0], reg_names[11]);
33216 asm_fprintf (file, "\tadd %s,%s,%s\n",
33217 reg_names[0], reg_names[0], reg_names[11]);
33219 else
33221 asm_fprintf (file, "\tlis %s,", reg_names[12]);
33222 assemble_name (file, buf);
33223 fputs ("@ha\n", file);
33224 asm_fprintf (file, "\tstw %s,4(%s)\n",
33225 reg_names[0], reg_names[1]);
33226 asm_fprintf (file, "\tla %s,", reg_names[0]);
33227 assemble_name (file, buf);
33228 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
33231 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
33232 fprintf (file, "\tbl %s%s\n",
33233 RS6000_MCOUNT, flag_pic ? "@plt" : "");
33234 break;
33236 case ABI_AIX:
33237 case ABI_ELFv2:
33238 case ABI_DARWIN:
33239 /* Don't do anything, done in output_profile_hook (). */
33240 break;
33246 /* The following variable value is the last issued insn. */
33248 static rtx_insn *last_scheduled_insn;
33250 /* The following variable helps to balance issuing of load and
33251 store instructions */
33253 static int load_store_pendulum;
33255 /* The following variable helps pair divide insns during scheduling. */
33256 static int divide_cnt;
33257 /* The following variable helps pair and alternate vector and vector load
33258 insns during scheduling. */
33259 static int vec_pairing;
33262 /* Power4 load update and store update instructions are cracked into a
33263 load or store and an integer insn which are executed in the same cycle.
33264 Branches have their own dispatch slot which does not count against the
33265 GCC issue rate, but it changes the program flow so there are no other
33266 instructions to issue in this cycle. */
33268 static int
33269 rs6000_variable_issue_1 (rtx_insn *insn, int more)
33271 last_scheduled_insn = insn;
33272 if (GET_CODE (PATTERN (insn)) == USE
33273 || GET_CODE (PATTERN (insn)) == CLOBBER)
33275 cached_can_issue_more = more;
33276 return cached_can_issue_more;
33279 if (insn_terminates_group_p (insn, current_group))
33281 cached_can_issue_more = 0;
33282 return cached_can_issue_more;
33285 /* If no reservation, but reach here */
33286 if (recog_memoized (insn) < 0)
33287 return more;
33289 if (rs6000_sched_groups)
33291 if (is_microcoded_insn (insn))
33292 cached_can_issue_more = 0;
33293 else if (is_cracked_insn (insn))
33294 cached_can_issue_more = more > 2 ? more - 2 : 0;
33295 else
33296 cached_can_issue_more = more - 1;
33298 return cached_can_issue_more;
33301 if (rs6000_cpu_attr == CPU_CELL && is_nonpipeline_insn (insn))
33302 return 0;
33304 cached_can_issue_more = more - 1;
33305 return cached_can_issue_more;
33308 static int
33309 rs6000_variable_issue (FILE *stream, int verbose, rtx_insn *insn, int more)
33311 int r = rs6000_variable_issue_1 (insn, more);
33312 if (verbose)
33313 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
33314 return r;
33317 /* Adjust the cost of a scheduling dependency. Return the new cost of
33318 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
33320 static int
33321 rs6000_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
33322 unsigned int)
33324 enum attr_type attr_type;
33326 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
33327 return cost;
33329 switch (dep_type)
33331 case REG_DEP_TRUE:
33333 /* Data dependency; DEP_INSN writes a register that INSN reads
33334 some cycles later. */
33336 /* Separate a load from a narrower, dependent store. */
33337 if ((rs6000_sched_groups || rs6000_cpu_attr == CPU_POWER9)
33338 && GET_CODE (PATTERN (insn)) == SET
33339 && GET_CODE (PATTERN (dep_insn)) == SET
33340 && GET_CODE (XEXP (PATTERN (insn), 1)) == MEM
33341 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == MEM
33342 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
33343 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
33344 return cost + 14;
33346 attr_type = get_attr_type (insn);
33348 switch (attr_type)
33350 case TYPE_JMPREG:
33351 /* Tell the first scheduling pass about the latency between
33352 a mtctr and bctr (and mtlr and br/blr). The first
33353 scheduling pass will not know about this latency since
33354 the mtctr instruction, which has the latency associated
33355 to it, will be generated by reload. */
33356 return 4;
33357 case TYPE_BRANCH:
33358 /* Leave some extra cycles between a compare and its
33359 dependent branch, to inhibit expensive mispredicts. */
33360 if ((rs6000_cpu_attr == CPU_PPC603
33361 || rs6000_cpu_attr == CPU_PPC604
33362 || rs6000_cpu_attr == CPU_PPC604E
33363 || rs6000_cpu_attr == CPU_PPC620
33364 || rs6000_cpu_attr == CPU_PPC630
33365 || rs6000_cpu_attr == CPU_PPC750
33366 || rs6000_cpu_attr == CPU_PPC7400
33367 || rs6000_cpu_attr == CPU_PPC7450
33368 || rs6000_cpu_attr == CPU_PPCE5500
33369 || rs6000_cpu_attr == CPU_PPCE6500
33370 || rs6000_cpu_attr == CPU_POWER4
33371 || rs6000_cpu_attr == CPU_POWER5
33372 || rs6000_cpu_attr == CPU_POWER7
33373 || rs6000_cpu_attr == CPU_POWER8
33374 || rs6000_cpu_attr == CPU_POWER9
33375 || rs6000_cpu_attr == CPU_CELL)
33376 && recog_memoized (dep_insn)
33377 && (INSN_CODE (dep_insn) >= 0))
33379 switch (get_attr_type (dep_insn))
33381 case TYPE_CMP:
33382 case TYPE_FPCOMPARE:
33383 case TYPE_CR_LOGICAL:
33384 case TYPE_DELAYED_CR:
33385 return cost + 2;
33386 case TYPE_EXTS:
33387 case TYPE_MUL:
33388 if (get_attr_dot (dep_insn) == DOT_YES)
33389 return cost + 2;
33390 else
33391 break;
33392 case TYPE_SHIFT:
33393 if (get_attr_dot (dep_insn) == DOT_YES
33394 && get_attr_var_shift (dep_insn) == VAR_SHIFT_NO)
33395 return cost + 2;
33396 else
33397 break;
33398 default:
33399 break;
33401 break;
33403 case TYPE_STORE:
33404 case TYPE_FPSTORE:
33405 if ((rs6000_cpu == PROCESSOR_POWER6)
33406 && recog_memoized (dep_insn)
33407 && (INSN_CODE (dep_insn) >= 0))
33410 if (GET_CODE (PATTERN (insn)) != SET)
33411 /* If this happens, we have to extend this to schedule
33412 optimally. Return default for now. */
33413 return cost;
33415 /* Adjust the cost for the case where the value written
33416 by a fixed point operation is used as the address
33417 gen value on a store. */
33418 switch (get_attr_type (dep_insn))
33420 case TYPE_LOAD:
33421 case TYPE_CNTLZ:
33423 if (! rs6000_store_data_bypass_p (dep_insn, insn))
33424 return get_attr_sign_extend (dep_insn)
33425 == SIGN_EXTEND_YES ? 6 : 4;
33426 break;
33428 case TYPE_SHIFT:
33430 if (! rs6000_store_data_bypass_p (dep_insn, insn))
33431 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
33432 6 : 3;
33433 break;
33435 case TYPE_INTEGER:
33436 case TYPE_ADD:
33437 case TYPE_LOGICAL:
33438 case TYPE_EXTS:
33439 case TYPE_INSERT:
33441 if (! rs6000_store_data_bypass_p (dep_insn, insn))
33442 return 3;
33443 break;
33445 case TYPE_STORE:
33446 case TYPE_FPLOAD:
33447 case TYPE_FPSTORE:
33449 if (get_attr_update (dep_insn) == UPDATE_YES
33450 && ! rs6000_store_data_bypass_p (dep_insn, insn))
33451 return 3;
33452 break;
33454 case TYPE_MUL:
33456 if (! rs6000_store_data_bypass_p (dep_insn, insn))
33457 return 17;
33458 break;
33460 case TYPE_DIV:
33462 if (! rs6000_store_data_bypass_p (dep_insn, insn))
33463 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
33464 break;
33466 default:
33467 break;
33470 break;
33472 case TYPE_LOAD:
33473 if ((rs6000_cpu == PROCESSOR_POWER6)
33474 && recog_memoized (dep_insn)
33475 && (INSN_CODE (dep_insn) >= 0))
33478 /* Adjust the cost for the case where the value written
33479 by a fixed point instruction is used within the address
33480 gen portion of a subsequent load(u)(x) */
33481 switch (get_attr_type (dep_insn))
33483 case TYPE_LOAD:
33484 case TYPE_CNTLZ:
33486 if (set_to_load_agen (dep_insn, insn))
33487 return get_attr_sign_extend (dep_insn)
33488 == SIGN_EXTEND_YES ? 6 : 4;
33489 break;
33491 case TYPE_SHIFT:
33493 if (set_to_load_agen (dep_insn, insn))
33494 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
33495 6 : 3;
33496 break;
33498 case TYPE_INTEGER:
33499 case TYPE_ADD:
33500 case TYPE_LOGICAL:
33501 case TYPE_EXTS:
33502 case TYPE_INSERT:
33504 if (set_to_load_agen (dep_insn, insn))
33505 return 3;
33506 break;
33508 case TYPE_STORE:
33509 case TYPE_FPLOAD:
33510 case TYPE_FPSTORE:
33512 if (get_attr_update (dep_insn) == UPDATE_YES
33513 && set_to_load_agen (dep_insn, insn))
33514 return 3;
33515 break;
33517 case TYPE_MUL:
33519 if (set_to_load_agen (dep_insn, insn))
33520 return 17;
33521 break;
33523 case TYPE_DIV:
33525 if (set_to_load_agen (dep_insn, insn))
33526 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
33527 break;
33529 default:
33530 break;
33533 break;
33535 case TYPE_FPLOAD:
33536 if ((rs6000_cpu == PROCESSOR_POWER6)
33537 && get_attr_update (insn) == UPDATE_NO
33538 && recog_memoized (dep_insn)
33539 && (INSN_CODE (dep_insn) >= 0)
33540 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
33541 return 2;
33543 default:
33544 break;
33547 /* Fall out to return default cost. */
33549 break;
33551 case REG_DEP_OUTPUT:
33552 /* Output dependency; DEP_INSN writes a register that INSN writes some
33553 cycles later. */
33554 if ((rs6000_cpu == PROCESSOR_POWER6)
33555 && recog_memoized (dep_insn)
33556 && (INSN_CODE (dep_insn) >= 0))
33558 attr_type = get_attr_type (insn);
33560 switch (attr_type)
33562 case TYPE_FP:
33563 case TYPE_FPSIMPLE:
33564 if (get_attr_type (dep_insn) == TYPE_FP
33565 || get_attr_type (dep_insn) == TYPE_FPSIMPLE)
33566 return 1;
33567 break;
33568 case TYPE_FPLOAD:
33569 if (get_attr_update (insn) == UPDATE_NO
33570 && get_attr_type (dep_insn) == TYPE_MFFGPR)
33571 return 2;
33572 break;
33573 default:
33574 break;
33577 /* Fall through, no cost for output dependency. */
33578 /* FALLTHRU */
33580 case REG_DEP_ANTI:
33581 /* Anti dependency; DEP_INSN reads a register that INSN writes some
33582 cycles later. */
33583 return 0;
33585 default:
33586 gcc_unreachable ();
33589 return cost;
33592 /* Debug version of rs6000_adjust_cost. */
33594 static int
33595 rs6000_debug_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn,
33596 int cost, unsigned int dw)
33598 int ret = rs6000_adjust_cost (insn, dep_type, dep_insn, cost, dw);
33600 if (ret != cost)
33602 const char *dep;
33604 switch (dep_type)
33606 default: dep = "unknown depencency"; break;
33607 case REG_DEP_TRUE: dep = "data dependency"; break;
33608 case REG_DEP_OUTPUT: dep = "output dependency"; break;
33609 case REG_DEP_ANTI: dep = "anti depencency"; break;
33612 fprintf (stderr,
33613 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
33614 "%s, insn:\n", ret, cost, dep);
33616 debug_rtx (insn);
33619 return ret;
33622 /* The function returns a true if INSN is microcoded.
33623 Return false otherwise. */
33625 static bool
33626 is_microcoded_insn (rtx_insn *insn)
33628 if (!insn || !NONDEBUG_INSN_P (insn)
33629 || GET_CODE (PATTERN (insn)) == USE
33630 || GET_CODE (PATTERN (insn)) == CLOBBER)
33631 return false;
33633 if (rs6000_cpu_attr == CPU_CELL)
33634 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
33636 if (rs6000_sched_groups
33637 && (rs6000_cpu == PROCESSOR_POWER4 || rs6000_cpu == PROCESSOR_POWER5))
33639 enum attr_type type = get_attr_type (insn);
33640 if ((type == TYPE_LOAD
33641 && get_attr_update (insn) == UPDATE_YES
33642 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES)
33643 || ((type == TYPE_LOAD || type == TYPE_STORE)
33644 && get_attr_update (insn) == UPDATE_YES
33645 && get_attr_indexed (insn) == INDEXED_YES)
33646 || type == TYPE_MFCR)
33647 return true;
33650 return false;
33653 /* The function returns true if INSN is cracked into 2 instructions
33654 by the processor (and therefore occupies 2 issue slots). */
33656 static bool
33657 is_cracked_insn (rtx_insn *insn)
33659 if (!insn || !NONDEBUG_INSN_P (insn)
33660 || GET_CODE (PATTERN (insn)) == USE
33661 || GET_CODE (PATTERN (insn)) == CLOBBER)
33662 return false;
33664 if (rs6000_sched_groups
33665 && (rs6000_cpu == PROCESSOR_POWER4 || rs6000_cpu == PROCESSOR_POWER5))
33667 enum attr_type type = get_attr_type (insn);
33668 if ((type == TYPE_LOAD
33669 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES
33670 && get_attr_update (insn) == UPDATE_NO)
33671 || (type == TYPE_LOAD
33672 && get_attr_sign_extend (insn) == SIGN_EXTEND_NO
33673 && get_attr_update (insn) == UPDATE_YES
33674 && get_attr_indexed (insn) == INDEXED_NO)
33675 || (type == TYPE_STORE
33676 && get_attr_update (insn) == UPDATE_YES
33677 && get_attr_indexed (insn) == INDEXED_NO)
33678 || ((type == TYPE_FPLOAD || type == TYPE_FPSTORE)
33679 && get_attr_update (insn) == UPDATE_YES)
33680 || type == TYPE_DELAYED_CR
33681 || (type == TYPE_EXTS
33682 && get_attr_dot (insn) == DOT_YES)
33683 || (type == TYPE_SHIFT
33684 && get_attr_dot (insn) == DOT_YES
33685 && get_attr_var_shift (insn) == VAR_SHIFT_NO)
33686 || (type == TYPE_MUL
33687 && get_attr_dot (insn) == DOT_YES)
33688 || type == TYPE_DIV
33689 || (type == TYPE_INSERT
33690 && get_attr_size (insn) == SIZE_32))
33691 return true;
33694 return false;
33697 /* The function returns true if INSN can be issued only from
33698 the branch slot. */
33700 static bool
33701 is_branch_slot_insn (rtx_insn *insn)
33703 if (!insn || !NONDEBUG_INSN_P (insn)
33704 || GET_CODE (PATTERN (insn)) == USE
33705 || GET_CODE (PATTERN (insn)) == CLOBBER)
33706 return false;
33708 if (rs6000_sched_groups)
33710 enum attr_type type = get_attr_type (insn);
33711 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
33712 return true;
33713 return false;
33716 return false;
33719 /* The function returns true if out_inst sets a value that is
33720 used in the address generation computation of in_insn */
33721 static bool
33722 set_to_load_agen (rtx_insn *out_insn, rtx_insn *in_insn)
33724 rtx out_set, in_set;
33726 /* For performance reasons, only handle the simple case where
33727 both loads are a single_set. */
33728 out_set = single_set (out_insn);
33729 if (out_set)
33731 in_set = single_set (in_insn);
33732 if (in_set)
33733 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
33736 return false;
33739 /* Try to determine base/offset/size parts of the given MEM.
33740 Return true if successful, false if all the values couldn't
33741 be determined.
33743 This function only looks for REG or REG+CONST address forms.
33744 REG+REG address form will return false. */
33746 static bool
33747 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
33748 HOST_WIDE_INT *size)
33750 rtx addr_rtx;
33751 if MEM_SIZE_KNOWN_P (mem)
33752 *size = MEM_SIZE (mem);
33753 else
33754 return false;
33756 addr_rtx = (XEXP (mem, 0));
33757 if (GET_CODE (addr_rtx) == PRE_MODIFY)
33758 addr_rtx = XEXP (addr_rtx, 1);
33760 *offset = 0;
33761 while (GET_CODE (addr_rtx) == PLUS
33762 && CONST_INT_P (XEXP (addr_rtx, 1)))
33764 *offset += INTVAL (XEXP (addr_rtx, 1));
33765 addr_rtx = XEXP (addr_rtx, 0);
33767 if (!REG_P (addr_rtx))
33768 return false;
33770 *base = addr_rtx;
33771 return true;
33774 /* The function returns true if the target storage location of
33775 mem1 is adjacent to the target storage location of mem2 */
33776 /* Return 1 if memory locations are adjacent. */
33778 static bool
33779 adjacent_mem_locations (rtx mem1, rtx mem2)
33781 rtx reg1, reg2;
33782 HOST_WIDE_INT off1, size1, off2, size2;
33784 if (get_memref_parts (mem1, &reg1, &off1, &size1)
33785 && get_memref_parts (mem2, &reg2, &off2, &size2))
33786 return ((REGNO (reg1) == REGNO (reg2))
33787 && ((off1 + size1 == off2)
33788 || (off2 + size2 == off1)));
33790 return false;
33793 /* This function returns true if it can be determined that the two MEM
33794 locations overlap by at least 1 byte based on base reg/offset/size. */
33796 static bool
33797 mem_locations_overlap (rtx mem1, rtx mem2)
33799 rtx reg1, reg2;
33800 HOST_WIDE_INT off1, size1, off2, size2;
33802 if (get_memref_parts (mem1, &reg1, &off1, &size1)
33803 && get_memref_parts (mem2, &reg2, &off2, &size2))
33804 return ((REGNO (reg1) == REGNO (reg2))
33805 && (((off1 <= off2) && (off1 + size1 > off2))
33806 || ((off2 <= off1) && (off2 + size2 > off1))));
33808 return false;
33811 /* A C statement (sans semicolon) to update the integer scheduling
33812 priority INSN_PRIORITY (INSN). Increase the priority to execute the
33813 INSN earlier, reduce the priority to execute INSN later. Do not
33814 define this macro if you do not need to adjust the scheduling
33815 priorities of insns. */
33817 static int
33818 rs6000_adjust_priority (rtx_insn *insn ATTRIBUTE_UNUSED, int priority)
33820 rtx load_mem, str_mem;
33821 /* On machines (like the 750) which have asymmetric integer units,
33822 where one integer unit can do multiply and divides and the other
33823 can't, reduce the priority of multiply/divide so it is scheduled
33824 before other integer operations. */
33826 #if 0
33827 if (! INSN_P (insn))
33828 return priority;
33830 if (GET_CODE (PATTERN (insn)) == USE)
33831 return priority;
33833 switch (rs6000_cpu_attr) {
33834 case CPU_PPC750:
33835 switch (get_attr_type (insn))
33837 default:
33838 break;
33840 case TYPE_MUL:
33841 case TYPE_DIV:
33842 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
33843 priority, priority);
33844 if (priority >= 0 && priority < 0x01000000)
33845 priority >>= 3;
33846 break;
33849 #endif
33851 if (insn_must_be_first_in_group (insn)
33852 && reload_completed
33853 && current_sched_info->sched_max_insns_priority
33854 && rs6000_sched_restricted_insns_priority)
33857 /* Prioritize insns that can be dispatched only in the first
33858 dispatch slot. */
33859 if (rs6000_sched_restricted_insns_priority == 1)
33860 /* Attach highest priority to insn. This means that in
33861 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
33862 precede 'priority' (critical path) considerations. */
33863 return current_sched_info->sched_max_insns_priority;
33864 else if (rs6000_sched_restricted_insns_priority == 2)
33865 /* Increase priority of insn by a minimal amount. This means that in
33866 haifa-sched.c:ready_sort(), only 'priority' (critical path)
33867 considerations precede dispatch-slot restriction considerations. */
33868 return (priority + 1);
33871 if (rs6000_cpu == PROCESSOR_POWER6
33872 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
33873 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
33874 /* Attach highest priority to insn if the scheduler has just issued two
33875 stores and this instruction is a load, or two loads and this instruction
33876 is a store. Power6 wants loads and stores scheduled alternately
33877 when possible */
33878 return current_sched_info->sched_max_insns_priority;
33880 return priority;
33883 /* Return true if the instruction is nonpipelined on the Cell. */
33884 static bool
33885 is_nonpipeline_insn (rtx_insn *insn)
33887 enum attr_type type;
33888 if (!insn || !NONDEBUG_INSN_P (insn)
33889 || GET_CODE (PATTERN (insn)) == USE
33890 || GET_CODE (PATTERN (insn)) == CLOBBER)
33891 return false;
33893 type = get_attr_type (insn);
33894 if (type == TYPE_MUL
33895 || type == TYPE_DIV
33896 || type == TYPE_SDIV
33897 || type == TYPE_DDIV
33898 || type == TYPE_SSQRT
33899 || type == TYPE_DSQRT
33900 || type == TYPE_MFCR
33901 || type == TYPE_MFCRF
33902 || type == TYPE_MFJMPR)
33904 return true;
33906 return false;
33910 /* Return how many instructions the machine can issue per cycle. */
33912 static int
33913 rs6000_issue_rate (void)
33915 /* Unless scheduling for register pressure, use issue rate of 1 for
33916 first scheduling pass to decrease degradation. */
33917 if (!reload_completed && !flag_sched_pressure)
33918 return 1;
33920 switch (rs6000_cpu_attr) {
33921 case CPU_RS64A:
33922 case CPU_PPC601: /* ? */
33923 case CPU_PPC7450:
33924 return 3;
33925 case CPU_PPC440:
33926 case CPU_PPC603:
33927 case CPU_PPC750:
33928 case CPU_PPC7400:
33929 case CPU_PPC8540:
33930 case CPU_PPC8548:
33931 case CPU_CELL:
33932 case CPU_PPCE300C2:
33933 case CPU_PPCE300C3:
33934 case CPU_PPCE500MC:
33935 case CPU_PPCE500MC64:
33936 case CPU_PPCE5500:
33937 case CPU_PPCE6500:
33938 case CPU_TITAN:
33939 return 2;
33940 case CPU_PPC476:
33941 case CPU_PPC604:
33942 case CPU_PPC604E:
33943 case CPU_PPC620:
33944 case CPU_PPC630:
33945 return 4;
33946 case CPU_POWER4:
33947 case CPU_POWER5:
33948 case CPU_POWER6:
33949 case CPU_POWER7:
33950 return 5;
33951 case CPU_POWER8:
33952 return 7;
33953 case CPU_POWER9:
33954 return 6;
33955 default:
33956 return 1;
33960 /* Return how many instructions to look ahead for better insn
33961 scheduling. */
33963 static int
33964 rs6000_use_sched_lookahead (void)
33966 switch (rs6000_cpu_attr)
33968 case CPU_PPC8540:
33969 case CPU_PPC8548:
33970 return 4;
33972 case CPU_CELL:
33973 return (reload_completed ? 8 : 0);
33975 default:
33976 return 0;
33980 /* We are choosing insn from the ready queue. Return zero if INSN can be
33981 chosen. */
33982 static int
33983 rs6000_use_sched_lookahead_guard (rtx_insn *insn, int ready_index)
33985 if (ready_index == 0)
33986 return 0;
33988 if (rs6000_cpu_attr != CPU_CELL)
33989 return 0;
33991 gcc_assert (insn != NULL_RTX && INSN_P (insn));
33993 if (!reload_completed
33994 || is_nonpipeline_insn (insn)
33995 || is_microcoded_insn (insn))
33996 return 1;
33998 return 0;
34001 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
34002 and return true. */
34004 static bool
34005 find_mem_ref (rtx pat, rtx *mem_ref)
34007 const char * fmt;
34008 int i, j;
34010 /* stack_tie does not produce any real memory traffic. */
34011 if (tie_operand (pat, VOIDmode))
34012 return false;
34014 if (GET_CODE (pat) == MEM)
34016 *mem_ref = pat;
34017 return true;
34020 /* Recursively process the pattern. */
34021 fmt = GET_RTX_FORMAT (GET_CODE (pat));
34023 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
34025 if (fmt[i] == 'e')
34027 if (find_mem_ref (XEXP (pat, i), mem_ref))
34028 return true;
34030 else if (fmt[i] == 'E')
34031 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
34033 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
34034 return true;
34038 return false;
34041 /* Determine if PAT is a PATTERN of a load insn. */
34043 static bool
34044 is_load_insn1 (rtx pat, rtx *load_mem)
34046 if (!pat || pat == NULL_RTX)
34047 return false;
34049 if (GET_CODE (pat) == SET)
34050 return find_mem_ref (SET_SRC (pat), load_mem);
34052 if (GET_CODE (pat) == PARALLEL)
34054 int i;
34056 for (i = 0; i < XVECLEN (pat, 0); i++)
34057 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
34058 return true;
34061 return false;
34064 /* Determine if INSN loads from memory. */
34066 static bool
34067 is_load_insn (rtx insn, rtx *load_mem)
34069 if (!insn || !INSN_P (insn))
34070 return false;
34072 if (CALL_P (insn))
34073 return false;
34075 return is_load_insn1 (PATTERN (insn), load_mem);
34078 /* Determine if PAT is a PATTERN of a store insn. */
34080 static bool
34081 is_store_insn1 (rtx pat, rtx *str_mem)
34083 if (!pat || pat == NULL_RTX)
34084 return false;
34086 if (GET_CODE (pat) == SET)
34087 return find_mem_ref (SET_DEST (pat), str_mem);
34089 if (GET_CODE (pat) == PARALLEL)
34091 int i;
34093 for (i = 0; i < XVECLEN (pat, 0); i++)
34094 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
34095 return true;
34098 return false;
34101 /* Determine if INSN stores to memory. */
34103 static bool
34104 is_store_insn (rtx insn, rtx *str_mem)
34106 if (!insn || !INSN_P (insn))
34107 return false;
34109 return is_store_insn1 (PATTERN (insn), str_mem);
34112 /* Return whether TYPE is a Power9 pairable vector instruction type. */
34114 static bool
34115 is_power9_pairable_vec_type (enum attr_type type)
34117 switch (type)
34119 case TYPE_VECSIMPLE:
34120 case TYPE_VECCOMPLEX:
34121 case TYPE_VECDIV:
34122 case TYPE_VECCMP:
34123 case TYPE_VECPERM:
34124 case TYPE_VECFLOAT:
34125 case TYPE_VECFDIV:
34126 case TYPE_VECDOUBLE:
34127 return true;
34128 default:
34129 break;
34131 return false;
34134 /* Returns whether the dependence between INSN and NEXT is considered
34135 costly by the given target. */
34137 static bool
34138 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
34140 rtx insn;
34141 rtx next;
34142 rtx load_mem, str_mem;
34144 /* If the flag is not enabled - no dependence is considered costly;
34145 allow all dependent insns in the same group.
34146 This is the most aggressive option. */
34147 if (rs6000_sched_costly_dep == no_dep_costly)
34148 return false;
34150 /* If the flag is set to 1 - a dependence is always considered costly;
34151 do not allow dependent instructions in the same group.
34152 This is the most conservative option. */
34153 if (rs6000_sched_costly_dep == all_deps_costly)
34154 return true;
34156 insn = DEP_PRO (dep);
34157 next = DEP_CON (dep);
34159 if (rs6000_sched_costly_dep == store_to_load_dep_costly
34160 && is_load_insn (next, &load_mem)
34161 && is_store_insn (insn, &str_mem))
34162 /* Prevent load after store in the same group. */
34163 return true;
34165 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
34166 && is_load_insn (next, &load_mem)
34167 && is_store_insn (insn, &str_mem)
34168 && DEP_TYPE (dep) == REG_DEP_TRUE
34169 && mem_locations_overlap(str_mem, load_mem))
34170 /* Prevent load after store in the same group if it is a true
34171 dependence. */
34172 return true;
34174 /* The flag is set to X; dependences with latency >= X are considered costly,
34175 and will not be scheduled in the same group. */
34176 if (rs6000_sched_costly_dep <= max_dep_latency
34177 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
34178 return true;
34180 return false;
34183 /* Return the next insn after INSN that is found before TAIL is reached,
34184 skipping any "non-active" insns - insns that will not actually occupy
34185 an issue slot. Return NULL_RTX if such an insn is not found. */
34187 static rtx_insn *
34188 get_next_active_insn (rtx_insn *insn, rtx_insn *tail)
34190 if (insn == NULL_RTX || insn == tail)
34191 return NULL;
34193 while (1)
34195 insn = NEXT_INSN (insn);
34196 if (insn == NULL_RTX || insn == tail)
34197 return NULL;
34199 if (CALL_P (insn)
34200 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
34201 || (NONJUMP_INSN_P (insn)
34202 && GET_CODE (PATTERN (insn)) != USE
34203 && GET_CODE (PATTERN (insn)) != CLOBBER
34204 && INSN_CODE (insn) != CODE_FOR_stack_tie))
34205 break;
34207 return insn;
34210 /* Do Power9 specific sched_reorder2 reordering of ready list. */
34212 static int
34213 power9_sched_reorder2 (rtx_insn **ready, int lastpos)
34215 int pos;
34216 int i;
34217 rtx_insn *tmp;
34218 enum attr_type type, type2;
34220 type = get_attr_type (last_scheduled_insn);
34222 /* Try to issue fixed point divides back-to-back in pairs so they will be
34223 routed to separate execution units and execute in parallel. */
34224 if (type == TYPE_DIV && divide_cnt == 0)
34226 /* First divide has been scheduled. */
34227 divide_cnt = 1;
34229 /* Scan the ready list looking for another divide, if found move it
34230 to the end of the list so it is chosen next. */
34231 pos = lastpos;
34232 while (pos >= 0)
34234 if (recog_memoized (ready[pos]) >= 0
34235 && get_attr_type (ready[pos]) == TYPE_DIV)
34237 tmp = ready[pos];
34238 for (i = pos; i < lastpos; i++)
34239 ready[i] = ready[i + 1];
34240 ready[lastpos] = tmp;
34241 break;
34243 pos--;
34246 else
34248 /* Last insn was the 2nd divide or not a divide, reset the counter. */
34249 divide_cnt = 0;
34251 /* The best dispatch throughput for vector and vector load insns can be
34252 achieved by interleaving a vector and vector load such that they'll
34253 dispatch to the same superslice. If this pairing cannot be achieved
34254 then it is best to pair vector insns together and vector load insns
34255 together.
34257 To aid in this pairing, vec_pairing maintains the current state with
34258 the following values:
34260 0 : Initial state, no vecload/vector pairing has been started.
34262 1 : A vecload or vector insn has been issued and a candidate for
34263 pairing has been found and moved to the end of the ready
34264 list. */
34265 if (type == TYPE_VECLOAD)
34267 /* Issued a vecload. */
34268 if (vec_pairing == 0)
34270 int vecload_pos = -1;
34271 /* We issued a single vecload, look for a vector insn to pair it
34272 with. If one isn't found, try to pair another vecload. */
34273 pos = lastpos;
34274 while (pos >= 0)
34276 if (recog_memoized (ready[pos]) >= 0)
34278 type2 = get_attr_type (ready[pos]);
34279 if (is_power9_pairable_vec_type (type2))
34281 /* Found a vector insn to pair with, move it to the
34282 end of the ready list so it is scheduled next. */
34283 tmp = ready[pos];
34284 for (i = pos; i < lastpos; i++)
34285 ready[i] = ready[i + 1];
34286 ready[lastpos] = tmp;
34287 vec_pairing = 1;
34288 return cached_can_issue_more;
34290 else if (type2 == TYPE_VECLOAD && vecload_pos == -1)
34291 /* Remember position of first vecload seen. */
34292 vecload_pos = pos;
34294 pos--;
34296 if (vecload_pos >= 0)
34298 /* Didn't find a vector to pair with but did find a vecload,
34299 move it to the end of the ready list. */
34300 tmp = ready[vecload_pos];
34301 for (i = vecload_pos; i < lastpos; i++)
34302 ready[i] = ready[i + 1];
34303 ready[lastpos] = tmp;
34304 vec_pairing = 1;
34305 return cached_can_issue_more;
34309 else if (is_power9_pairable_vec_type (type))
34311 /* Issued a vector operation. */
34312 if (vec_pairing == 0)
34314 int vec_pos = -1;
34315 /* We issued a single vector insn, look for a vecload to pair it
34316 with. If one isn't found, try to pair another vector. */
34317 pos = lastpos;
34318 while (pos >= 0)
34320 if (recog_memoized (ready[pos]) >= 0)
34322 type2 = get_attr_type (ready[pos]);
34323 if (type2 == TYPE_VECLOAD)
34325 /* Found a vecload insn to pair with, move it to the
34326 end of the ready list so it is scheduled next. */
34327 tmp = ready[pos];
34328 for (i = pos; i < lastpos; i++)
34329 ready[i] = ready[i + 1];
34330 ready[lastpos] = tmp;
34331 vec_pairing = 1;
34332 return cached_can_issue_more;
34334 else if (is_power9_pairable_vec_type (type2)
34335 && vec_pos == -1)
34336 /* Remember position of first vector insn seen. */
34337 vec_pos = pos;
34339 pos--;
34341 if (vec_pos >= 0)
34343 /* Didn't find a vecload to pair with but did find a vector
34344 insn, move it to the end of the ready list. */
34345 tmp = ready[vec_pos];
34346 for (i = vec_pos; i < lastpos; i++)
34347 ready[i] = ready[i + 1];
34348 ready[lastpos] = tmp;
34349 vec_pairing = 1;
34350 return cached_can_issue_more;
34355 /* We've either finished a vec/vecload pair, couldn't find an insn to
34356 continue the current pair, or the last insn had nothing to do with
34357 with pairing. In any case, reset the state. */
34358 vec_pairing = 0;
34361 return cached_can_issue_more;
34364 /* We are about to begin issuing insns for this clock cycle. */
34366 static int
34367 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
34368 rtx_insn **ready ATTRIBUTE_UNUSED,
34369 int *pn_ready ATTRIBUTE_UNUSED,
34370 int clock_var ATTRIBUTE_UNUSED)
34372 int n_ready = *pn_ready;
34374 if (sched_verbose)
34375 fprintf (dump, "// rs6000_sched_reorder :\n");
34377 /* Reorder the ready list, if the second to last ready insn
34378 is a nonepipeline insn. */
34379 if (rs6000_cpu_attr == CPU_CELL && n_ready > 1)
34381 if (is_nonpipeline_insn (ready[n_ready - 1])
34382 && (recog_memoized (ready[n_ready - 2]) > 0))
34383 /* Simply swap first two insns. */
34384 std::swap (ready[n_ready - 1], ready[n_ready - 2]);
34387 if (rs6000_cpu == PROCESSOR_POWER6)
34388 load_store_pendulum = 0;
34390 return rs6000_issue_rate ();
34393 /* Like rs6000_sched_reorder, but called after issuing each insn. */
34395 static int
34396 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx_insn **ready,
34397 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
34399 if (sched_verbose)
34400 fprintf (dump, "// rs6000_sched_reorder2 :\n");
34402 /* For Power6, we need to handle some special cases to try and keep the
34403 store queue from overflowing and triggering expensive flushes.
34405 This code monitors how load and store instructions are being issued
34406 and skews the ready list one way or the other to increase the likelihood
34407 that a desired instruction is issued at the proper time.
34409 A couple of things are done. First, we maintain a "load_store_pendulum"
34410 to track the current state of load/store issue.
34412 - If the pendulum is at zero, then no loads or stores have been
34413 issued in the current cycle so we do nothing.
34415 - If the pendulum is 1, then a single load has been issued in this
34416 cycle and we attempt to locate another load in the ready list to
34417 issue with it.
34419 - If the pendulum is -2, then two stores have already been
34420 issued in this cycle, so we increase the priority of the first load
34421 in the ready list to increase it's likelihood of being chosen first
34422 in the next cycle.
34424 - If the pendulum is -1, then a single store has been issued in this
34425 cycle and we attempt to locate another store in the ready list to
34426 issue with it, preferring a store to an adjacent memory location to
34427 facilitate store pairing in the store queue.
34429 - If the pendulum is 2, then two loads have already been
34430 issued in this cycle, so we increase the priority of the first store
34431 in the ready list to increase it's likelihood of being chosen first
34432 in the next cycle.
34434 - If the pendulum < -2 or > 2, then do nothing.
34436 Note: This code covers the most common scenarios. There exist non
34437 load/store instructions which make use of the LSU and which
34438 would need to be accounted for to strictly model the behavior
34439 of the machine. Those instructions are currently unaccounted
34440 for to help minimize compile time overhead of this code.
34442 if (rs6000_cpu == PROCESSOR_POWER6 && last_scheduled_insn)
34444 int pos;
34445 int i;
34446 rtx_insn *tmp;
34447 rtx load_mem, str_mem;
34449 if (is_store_insn (last_scheduled_insn, &str_mem))
34450 /* Issuing a store, swing the load_store_pendulum to the left */
34451 load_store_pendulum--;
34452 else if (is_load_insn (last_scheduled_insn, &load_mem))
34453 /* Issuing a load, swing the load_store_pendulum to the right */
34454 load_store_pendulum++;
34455 else
34456 return cached_can_issue_more;
34458 /* If the pendulum is balanced, or there is only one instruction on
34459 the ready list, then all is well, so return. */
34460 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
34461 return cached_can_issue_more;
34463 if (load_store_pendulum == 1)
34465 /* A load has been issued in this cycle. Scan the ready list
34466 for another load to issue with it */
34467 pos = *pn_ready-1;
34469 while (pos >= 0)
34471 if (is_load_insn (ready[pos], &load_mem))
34473 /* Found a load. Move it to the head of the ready list,
34474 and adjust it's priority so that it is more likely to
34475 stay there */
34476 tmp = ready[pos];
34477 for (i=pos; i<*pn_ready-1; i++)
34478 ready[i] = ready[i + 1];
34479 ready[*pn_ready-1] = tmp;
34481 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
34482 INSN_PRIORITY (tmp)++;
34483 break;
34485 pos--;
34488 else if (load_store_pendulum == -2)
34490 /* Two stores have been issued in this cycle. Increase the
34491 priority of the first load in the ready list to favor it for
34492 issuing in the next cycle. */
34493 pos = *pn_ready-1;
34495 while (pos >= 0)
34497 if (is_load_insn (ready[pos], &load_mem)
34498 && !sel_sched_p ()
34499 && INSN_PRIORITY_KNOWN (ready[pos]))
34501 INSN_PRIORITY (ready[pos])++;
34503 /* Adjust the pendulum to account for the fact that a load
34504 was found and increased in priority. This is to prevent
34505 increasing the priority of multiple loads */
34506 load_store_pendulum--;
34508 break;
34510 pos--;
34513 else if (load_store_pendulum == -1)
34515 /* A store has been issued in this cycle. Scan the ready list for
34516 another store to issue with it, preferring a store to an adjacent
34517 memory location */
34518 int first_store_pos = -1;
34520 pos = *pn_ready-1;
34522 while (pos >= 0)
34524 if (is_store_insn (ready[pos], &str_mem))
34526 rtx str_mem2;
34527 /* Maintain the index of the first store found on the
34528 list */
34529 if (first_store_pos == -1)
34530 first_store_pos = pos;
34532 if (is_store_insn (last_scheduled_insn, &str_mem2)
34533 && adjacent_mem_locations (str_mem, str_mem2))
34535 /* Found an adjacent store. Move it to the head of the
34536 ready list, and adjust it's priority so that it is
34537 more likely to stay there */
34538 tmp = ready[pos];
34539 for (i=pos; i<*pn_ready-1; i++)
34540 ready[i] = ready[i + 1];
34541 ready[*pn_ready-1] = tmp;
34543 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
34544 INSN_PRIORITY (tmp)++;
34546 first_store_pos = -1;
34548 break;
34551 pos--;
34554 if (first_store_pos >= 0)
34556 /* An adjacent store wasn't found, but a non-adjacent store was,
34557 so move the non-adjacent store to the front of the ready
34558 list, and adjust its priority so that it is more likely to
34559 stay there. */
34560 tmp = ready[first_store_pos];
34561 for (i=first_store_pos; i<*pn_ready-1; i++)
34562 ready[i] = ready[i + 1];
34563 ready[*pn_ready-1] = tmp;
34564 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
34565 INSN_PRIORITY (tmp)++;
34568 else if (load_store_pendulum == 2)
34570 /* Two loads have been issued in this cycle. Increase the priority
34571 of the first store in the ready list to favor it for issuing in
34572 the next cycle. */
34573 pos = *pn_ready-1;
34575 while (pos >= 0)
34577 if (is_store_insn (ready[pos], &str_mem)
34578 && !sel_sched_p ()
34579 && INSN_PRIORITY_KNOWN (ready[pos]))
34581 INSN_PRIORITY (ready[pos])++;
34583 /* Adjust the pendulum to account for the fact that a store
34584 was found and increased in priority. This is to prevent
34585 increasing the priority of multiple stores */
34586 load_store_pendulum++;
34588 break;
34590 pos--;
34595 /* Do Power9 dependent reordering if necessary. */
34596 if (rs6000_cpu == PROCESSOR_POWER9 && last_scheduled_insn
34597 && recog_memoized (last_scheduled_insn) >= 0)
34598 return power9_sched_reorder2 (ready, *pn_ready - 1);
34600 return cached_can_issue_more;
34603 /* Return whether the presence of INSN causes a dispatch group termination
34604 of group WHICH_GROUP.
34606 If WHICH_GROUP == current_group, this function will return true if INSN
34607 causes the termination of the current group (i.e, the dispatch group to
34608 which INSN belongs). This means that INSN will be the last insn in the
34609 group it belongs to.
34611 If WHICH_GROUP == previous_group, this function will return true if INSN
34612 causes the termination of the previous group (i.e, the dispatch group that
34613 precedes the group to which INSN belongs). This means that INSN will be
34614 the first insn in the group it belongs to). */
34616 static bool
34617 insn_terminates_group_p (rtx_insn *insn, enum group_termination which_group)
34619 bool first, last;
34621 if (! insn)
34622 return false;
34624 first = insn_must_be_first_in_group (insn);
34625 last = insn_must_be_last_in_group (insn);
34627 if (first && last)
34628 return true;
34630 if (which_group == current_group)
34631 return last;
34632 else if (which_group == previous_group)
34633 return first;
34635 return false;
34639 static bool
34640 insn_must_be_first_in_group (rtx_insn *insn)
34642 enum attr_type type;
34644 if (!insn
34645 || NOTE_P (insn)
34646 || DEBUG_INSN_P (insn)
34647 || GET_CODE (PATTERN (insn)) == USE
34648 || GET_CODE (PATTERN (insn)) == CLOBBER)
34649 return false;
34651 switch (rs6000_cpu)
34653 case PROCESSOR_POWER5:
34654 if (is_cracked_insn (insn))
34655 return true;
34656 /* FALLTHRU */
34657 case PROCESSOR_POWER4:
34658 if (is_microcoded_insn (insn))
34659 return true;
34661 if (!rs6000_sched_groups)
34662 return false;
34664 type = get_attr_type (insn);
34666 switch (type)
34668 case TYPE_MFCR:
34669 case TYPE_MFCRF:
34670 case TYPE_MTCR:
34671 case TYPE_DELAYED_CR:
34672 case TYPE_CR_LOGICAL:
34673 case TYPE_MTJMPR:
34674 case TYPE_MFJMPR:
34675 case TYPE_DIV:
34676 case TYPE_LOAD_L:
34677 case TYPE_STORE_C:
34678 case TYPE_ISYNC:
34679 case TYPE_SYNC:
34680 return true;
34681 default:
34682 break;
34684 break;
34685 case PROCESSOR_POWER6:
34686 type = get_attr_type (insn);
34688 switch (type)
34690 case TYPE_EXTS:
34691 case TYPE_CNTLZ:
34692 case TYPE_TRAP:
34693 case TYPE_MUL:
34694 case TYPE_INSERT:
34695 case TYPE_FPCOMPARE:
34696 case TYPE_MFCR:
34697 case TYPE_MTCR:
34698 case TYPE_MFJMPR:
34699 case TYPE_MTJMPR:
34700 case TYPE_ISYNC:
34701 case TYPE_SYNC:
34702 case TYPE_LOAD_L:
34703 case TYPE_STORE_C:
34704 return true;
34705 case TYPE_SHIFT:
34706 if (get_attr_dot (insn) == DOT_NO
34707 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
34708 return true;
34709 else
34710 break;
34711 case TYPE_DIV:
34712 if (get_attr_size (insn) == SIZE_32)
34713 return true;
34714 else
34715 break;
34716 case TYPE_LOAD:
34717 case TYPE_STORE:
34718 case TYPE_FPLOAD:
34719 case TYPE_FPSTORE:
34720 if (get_attr_update (insn) == UPDATE_YES)
34721 return true;
34722 else
34723 break;
34724 default:
34725 break;
34727 break;
34728 case PROCESSOR_POWER7:
34729 type = get_attr_type (insn);
34731 switch (type)
34733 case TYPE_CR_LOGICAL:
34734 case TYPE_MFCR:
34735 case TYPE_MFCRF:
34736 case TYPE_MTCR:
34737 case TYPE_DIV:
34738 case TYPE_ISYNC:
34739 case TYPE_LOAD_L:
34740 case TYPE_STORE_C:
34741 case TYPE_MFJMPR:
34742 case TYPE_MTJMPR:
34743 return true;
34744 case TYPE_MUL:
34745 case TYPE_SHIFT:
34746 case TYPE_EXTS:
34747 if (get_attr_dot (insn) == DOT_YES)
34748 return true;
34749 else
34750 break;
34751 case TYPE_LOAD:
34752 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
34753 || get_attr_update (insn) == UPDATE_YES)
34754 return true;
34755 else
34756 break;
34757 case TYPE_STORE:
34758 case TYPE_FPLOAD:
34759 case TYPE_FPSTORE:
34760 if (get_attr_update (insn) == UPDATE_YES)
34761 return true;
34762 else
34763 break;
34764 default:
34765 break;
34767 break;
34768 case PROCESSOR_POWER8:
34769 type = get_attr_type (insn);
34771 switch (type)
34773 case TYPE_CR_LOGICAL:
34774 case TYPE_DELAYED_CR:
34775 case TYPE_MFCR:
34776 case TYPE_MFCRF:
34777 case TYPE_MTCR:
34778 case TYPE_SYNC:
34779 case TYPE_ISYNC:
34780 case TYPE_LOAD_L:
34781 case TYPE_STORE_C:
34782 case TYPE_VECSTORE:
34783 case TYPE_MFJMPR:
34784 case TYPE_MTJMPR:
34785 return true;
34786 case TYPE_SHIFT:
34787 case TYPE_EXTS:
34788 case TYPE_MUL:
34789 if (get_attr_dot (insn) == DOT_YES)
34790 return true;
34791 else
34792 break;
34793 case TYPE_LOAD:
34794 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
34795 || get_attr_update (insn) == UPDATE_YES)
34796 return true;
34797 else
34798 break;
34799 case TYPE_STORE:
34800 if (get_attr_update (insn) == UPDATE_YES
34801 && get_attr_indexed (insn) == INDEXED_YES)
34802 return true;
34803 else
34804 break;
34805 default:
34806 break;
34808 break;
34809 default:
34810 break;
34813 return false;
34816 static bool
34817 insn_must_be_last_in_group (rtx_insn *insn)
34819 enum attr_type type;
34821 if (!insn
34822 || NOTE_P (insn)
34823 || DEBUG_INSN_P (insn)
34824 || GET_CODE (PATTERN (insn)) == USE
34825 || GET_CODE (PATTERN (insn)) == CLOBBER)
34826 return false;
34828 switch (rs6000_cpu) {
34829 case PROCESSOR_POWER4:
34830 case PROCESSOR_POWER5:
34831 if (is_microcoded_insn (insn))
34832 return true;
34834 if (is_branch_slot_insn (insn))
34835 return true;
34837 break;
34838 case PROCESSOR_POWER6:
34839 type = get_attr_type (insn);
34841 switch (type)
34843 case TYPE_EXTS:
34844 case TYPE_CNTLZ:
34845 case TYPE_TRAP:
34846 case TYPE_MUL:
34847 case TYPE_FPCOMPARE:
34848 case TYPE_MFCR:
34849 case TYPE_MTCR:
34850 case TYPE_MFJMPR:
34851 case TYPE_MTJMPR:
34852 case TYPE_ISYNC:
34853 case TYPE_SYNC:
34854 case TYPE_LOAD_L:
34855 case TYPE_STORE_C:
34856 return true;
34857 case TYPE_SHIFT:
34858 if (get_attr_dot (insn) == DOT_NO
34859 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
34860 return true;
34861 else
34862 break;
34863 case TYPE_DIV:
34864 if (get_attr_size (insn) == SIZE_32)
34865 return true;
34866 else
34867 break;
34868 default:
34869 break;
34871 break;
34872 case PROCESSOR_POWER7:
34873 type = get_attr_type (insn);
34875 switch (type)
34877 case TYPE_ISYNC:
34878 case TYPE_SYNC:
34879 case TYPE_LOAD_L:
34880 case TYPE_STORE_C:
34881 return true;
34882 case TYPE_LOAD:
34883 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
34884 && get_attr_update (insn) == UPDATE_YES)
34885 return true;
34886 else
34887 break;
34888 case TYPE_STORE:
34889 if (get_attr_update (insn) == UPDATE_YES
34890 && get_attr_indexed (insn) == INDEXED_YES)
34891 return true;
34892 else
34893 break;
34894 default:
34895 break;
34897 break;
34898 case PROCESSOR_POWER8:
34899 type = get_attr_type (insn);
34901 switch (type)
34903 case TYPE_MFCR:
34904 case TYPE_MTCR:
34905 case TYPE_ISYNC:
34906 case TYPE_SYNC:
34907 case TYPE_LOAD_L:
34908 case TYPE_STORE_C:
34909 return true;
34910 case TYPE_LOAD:
34911 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
34912 && get_attr_update (insn) == UPDATE_YES)
34913 return true;
34914 else
34915 break;
34916 case TYPE_STORE:
34917 if (get_attr_update (insn) == UPDATE_YES
34918 && get_attr_indexed (insn) == INDEXED_YES)
34919 return true;
34920 else
34921 break;
34922 default:
34923 break;
34925 break;
34926 default:
34927 break;
34930 return false;
34933 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
34934 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
34936 static bool
34937 is_costly_group (rtx *group_insns, rtx next_insn)
34939 int i;
34940 int issue_rate = rs6000_issue_rate ();
34942 for (i = 0; i < issue_rate; i++)
34944 sd_iterator_def sd_it;
34945 dep_t dep;
34946 rtx insn = group_insns[i];
34948 if (!insn)
34949 continue;
34951 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
34953 rtx next = DEP_CON (dep);
34955 if (next == next_insn
34956 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
34957 return true;
34961 return false;
34964 /* Utility of the function redefine_groups.
34965 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
34966 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
34967 to keep it "far" (in a separate group) from GROUP_INSNS, following
34968 one of the following schemes, depending on the value of the flag
34969 -minsert_sched_nops = X:
34970 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
34971 in order to force NEXT_INSN into a separate group.
34972 (2) X < sched_finish_regroup_exact: insert exactly X nops.
34973 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
34974 insertion (has a group just ended, how many vacant issue slots remain in the
34975 last group, and how many dispatch groups were encountered so far). */
34977 static int
34978 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
34979 rtx_insn *next_insn, bool *group_end, int can_issue_more,
34980 int *group_count)
34982 rtx nop;
34983 bool force;
34984 int issue_rate = rs6000_issue_rate ();
34985 bool end = *group_end;
34986 int i;
34988 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
34989 return can_issue_more;
34991 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
34992 return can_issue_more;
34994 force = is_costly_group (group_insns, next_insn);
34995 if (!force)
34996 return can_issue_more;
34998 if (sched_verbose > 6)
34999 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
35000 *group_count ,can_issue_more);
35002 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
35004 if (*group_end)
35005 can_issue_more = 0;
35007 /* Since only a branch can be issued in the last issue_slot, it is
35008 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
35009 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
35010 in this case the last nop will start a new group and the branch
35011 will be forced to the new group. */
35012 if (can_issue_more && !is_branch_slot_insn (next_insn))
35013 can_issue_more--;
35015 /* Do we have a special group ending nop? */
35016 if (rs6000_cpu_attr == CPU_POWER6 || rs6000_cpu_attr == CPU_POWER7
35017 || rs6000_cpu_attr == CPU_POWER8)
35019 nop = gen_group_ending_nop ();
35020 emit_insn_before (nop, next_insn);
35021 can_issue_more = 0;
35023 else
35024 while (can_issue_more > 0)
35026 nop = gen_nop ();
35027 emit_insn_before (nop, next_insn);
35028 can_issue_more--;
35031 *group_end = true;
35032 return 0;
35035 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
35037 int n_nops = rs6000_sched_insert_nops;
35039 /* Nops can't be issued from the branch slot, so the effective
35040 issue_rate for nops is 'issue_rate - 1'. */
35041 if (can_issue_more == 0)
35042 can_issue_more = issue_rate;
35043 can_issue_more--;
35044 if (can_issue_more == 0)
35046 can_issue_more = issue_rate - 1;
35047 (*group_count)++;
35048 end = true;
35049 for (i = 0; i < issue_rate; i++)
35051 group_insns[i] = 0;
35055 while (n_nops > 0)
35057 nop = gen_nop ();
35058 emit_insn_before (nop, next_insn);
35059 if (can_issue_more == issue_rate - 1) /* new group begins */
35060 end = false;
35061 can_issue_more--;
35062 if (can_issue_more == 0)
35064 can_issue_more = issue_rate - 1;
35065 (*group_count)++;
35066 end = true;
35067 for (i = 0; i < issue_rate; i++)
35069 group_insns[i] = 0;
35072 n_nops--;
35075 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
35076 can_issue_more++;
35078 /* Is next_insn going to start a new group? */
35079 *group_end
35080 = (end
35081 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
35082 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
35083 || (can_issue_more < issue_rate &&
35084 insn_terminates_group_p (next_insn, previous_group)));
35085 if (*group_end && end)
35086 (*group_count)--;
35088 if (sched_verbose > 6)
35089 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
35090 *group_count, can_issue_more);
35091 return can_issue_more;
35094 return can_issue_more;
35097 /* This function tries to synch the dispatch groups that the compiler "sees"
35098 with the dispatch groups that the processor dispatcher is expected to
35099 form in practice. It tries to achieve this synchronization by forcing the
35100 estimated processor grouping on the compiler (as opposed to the function
35101 'pad_goups' which tries to force the scheduler's grouping on the processor).
35103 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
35104 examines the (estimated) dispatch groups that will be formed by the processor
35105 dispatcher. It marks these group boundaries to reflect the estimated
35106 processor grouping, overriding the grouping that the scheduler had marked.
35107 Depending on the value of the flag '-minsert-sched-nops' this function can
35108 force certain insns into separate groups or force a certain distance between
35109 them by inserting nops, for example, if there exists a "costly dependence"
35110 between the insns.
35112 The function estimates the group boundaries that the processor will form as
35113 follows: It keeps track of how many vacant issue slots are available after
35114 each insn. A subsequent insn will start a new group if one of the following
35115 4 cases applies:
35116 - no more vacant issue slots remain in the current dispatch group.
35117 - only the last issue slot, which is the branch slot, is vacant, but the next
35118 insn is not a branch.
35119 - only the last 2 or less issue slots, including the branch slot, are vacant,
35120 which means that a cracked insn (which occupies two issue slots) can't be
35121 issued in this group.
35122 - less than 'issue_rate' slots are vacant, and the next insn always needs to
35123 start a new group. */
35125 static int
35126 redefine_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
35127 rtx_insn *tail)
35129 rtx_insn *insn, *next_insn;
35130 int issue_rate;
35131 int can_issue_more;
35132 int slot, i;
35133 bool group_end;
35134 int group_count = 0;
35135 rtx *group_insns;
35137 /* Initialize. */
35138 issue_rate = rs6000_issue_rate ();
35139 group_insns = XALLOCAVEC (rtx, issue_rate);
35140 for (i = 0; i < issue_rate; i++)
35142 group_insns[i] = 0;
35144 can_issue_more = issue_rate;
35145 slot = 0;
35146 insn = get_next_active_insn (prev_head_insn, tail);
35147 group_end = false;
35149 while (insn != NULL_RTX)
35151 slot = (issue_rate - can_issue_more);
35152 group_insns[slot] = insn;
35153 can_issue_more =
35154 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
35155 if (insn_terminates_group_p (insn, current_group))
35156 can_issue_more = 0;
35158 next_insn = get_next_active_insn (insn, tail);
35159 if (next_insn == NULL_RTX)
35160 return group_count + 1;
35162 /* Is next_insn going to start a new group? */
35163 group_end
35164 = (can_issue_more == 0
35165 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
35166 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
35167 || (can_issue_more < issue_rate &&
35168 insn_terminates_group_p (next_insn, previous_group)));
35170 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
35171 next_insn, &group_end, can_issue_more,
35172 &group_count);
35174 if (group_end)
35176 group_count++;
35177 can_issue_more = 0;
35178 for (i = 0; i < issue_rate; i++)
35180 group_insns[i] = 0;
35184 if (GET_MODE (next_insn) == TImode && can_issue_more)
35185 PUT_MODE (next_insn, VOIDmode);
35186 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
35187 PUT_MODE (next_insn, TImode);
35189 insn = next_insn;
35190 if (can_issue_more == 0)
35191 can_issue_more = issue_rate;
35192 } /* while */
35194 return group_count;
35197 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
35198 dispatch group boundaries that the scheduler had marked. Pad with nops
35199 any dispatch groups which have vacant issue slots, in order to force the
35200 scheduler's grouping on the processor dispatcher. The function
35201 returns the number of dispatch groups found. */
35203 static int
35204 pad_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
35205 rtx_insn *tail)
35207 rtx_insn *insn, *next_insn;
35208 rtx nop;
35209 int issue_rate;
35210 int can_issue_more;
35211 int group_end;
35212 int group_count = 0;
35214 /* Initialize issue_rate. */
35215 issue_rate = rs6000_issue_rate ();
35216 can_issue_more = issue_rate;
35218 insn = get_next_active_insn (prev_head_insn, tail);
35219 next_insn = get_next_active_insn (insn, tail);
35221 while (insn != NULL_RTX)
35223 can_issue_more =
35224 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
35226 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
35228 if (next_insn == NULL_RTX)
35229 break;
35231 if (group_end)
35233 /* If the scheduler had marked group termination at this location
35234 (between insn and next_insn), and neither insn nor next_insn will
35235 force group termination, pad the group with nops to force group
35236 termination. */
35237 if (can_issue_more
35238 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
35239 && !insn_terminates_group_p (insn, current_group)
35240 && !insn_terminates_group_p (next_insn, previous_group))
35242 if (!is_branch_slot_insn (next_insn))
35243 can_issue_more--;
35245 while (can_issue_more)
35247 nop = gen_nop ();
35248 emit_insn_before (nop, next_insn);
35249 can_issue_more--;
35253 can_issue_more = issue_rate;
35254 group_count++;
35257 insn = next_insn;
35258 next_insn = get_next_active_insn (insn, tail);
35261 return group_count;
35264 /* We're beginning a new block. Initialize data structures as necessary. */
35266 static void
35267 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
35268 int sched_verbose ATTRIBUTE_UNUSED,
35269 int max_ready ATTRIBUTE_UNUSED)
35271 last_scheduled_insn = NULL;
35272 load_store_pendulum = 0;
35273 divide_cnt = 0;
35274 vec_pairing = 0;
35277 /* The following function is called at the end of scheduling BB.
35278 After reload, it inserts nops at insn group bundling. */
35280 static void
35281 rs6000_sched_finish (FILE *dump, int sched_verbose)
35283 int n_groups;
35285 if (sched_verbose)
35286 fprintf (dump, "=== Finishing schedule.\n");
35288 if (reload_completed && rs6000_sched_groups)
35290 /* Do not run sched_finish hook when selective scheduling enabled. */
35291 if (sel_sched_p ())
35292 return;
35294 if (rs6000_sched_insert_nops == sched_finish_none)
35295 return;
35297 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
35298 n_groups = pad_groups (dump, sched_verbose,
35299 current_sched_info->prev_head,
35300 current_sched_info->next_tail);
35301 else
35302 n_groups = redefine_groups (dump, sched_verbose,
35303 current_sched_info->prev_head,
35304 current_sched_info->next_tail);
35306 if (sched_verbose >= 6)
35308 fprintf (dump, "ngroups = %d\n", n_groups);
35309 print_rtl (dump, current_sched_info->prev_head);
35310 fprintf (dump, "Done finish_sched\n");
35315 struct rs6000_sched_context
35317 short cached_can_issue_more;
35318 rtx_insn *last_scheduled_insn;
35319 int load_store_pendulum;
35320 int divide_cnt;
35321 int vec_pairing;
35324 typedef struct rs6000_sched_context rs6000_sched_context_def;
35325 typedef rs6000_sched_context_def *rs6000_sched_context_t;
35327 /* Allocate store for new scheduling context. */
35328 static void *
35329 rs6000_alloc_sched_context (void)
35331 return xmalloc (sizeof (rs6000_sched_context_def));
35334 /* If CLEAN_P is true then initializes _SC with clean data,
35335 and from the global context otherwise. */
35336 static void
35337 rs6000_init_sched_context (void *_sc, bool clean_p)
35339 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
35341 if (clean_p)
35343 sc->cached_can_issue_more = 0;
35344 sc->last_scheduled_insn = NULL;
35345 sc->load_store_pendulum = 0;
35346 sc->divide_cnt = 0;
35347 sc->vec_pairing = 0;
35349 else
35351 sc->cached_can_issue_more = cached_can_issue_more;
35352 sc->last_scheduled_insn = last_scheduled_insn;
35353 sc->load_store_pendulum = load_store_pendulum;
35354 sc->divide_cnt = divide_cnt;
35355 sc->vec_pairing = vec_pairing;
35359 /* Sets the global scheduling context to the one pointed to by _SC. */
35360 static void
35361 rs6000_set_sched_context (void *_sc)
35363 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
35365 gcc_assert (sc != NULL);
35367 cached_can_issue_more = sc->cached_can_issue_more;
35368 last_scheduled_insn = sc->last_scheduled_insn;
35369 load_store_pendulum = sc->load_store_pendulum;
35370 divide_cnt = sc->divide_cnt;
35371 vec_pairing = sc->vec_pairing;
35374 /* Free _SC. */
35375 static void
35376 rs6000_free_sched_context (void *_sc)
35378 gcc_assert (_sc != NULL);
35380 free (_sc);
35383 static bool
35384 rs6000_sched_can_speculate_insn (rtx_insn *insn)
35386 switch (get_attr_type (insn))
35388 case TYPE_DIV:
35389 case TYPE_SDIV:
35390 case TYPE_DDIV:
35391 case TYPE_VECDIV:
35392 case TYPE_SSQRT:
35393 case TYPE_DSQRT:
35394 return false;
35396 default:
35397 return true;
35401 /* Length in units of the trampoline for entering a nested function. */
35404 rs6000_trampoline_size (void)
35406 int ret = 0;
35408 switch (DEFAULT_ABI)
35410 default:
35411 gcc_unreachable ();
35413 case ABI_AIX:
35414 ret = (TARGET_32BIT) ? 12 : 24;
35415 break;
35417 case ABI_ELFv2:
35418 gcc_assert (!TARGET_32BIT);
35419 ret = 32;
35420 break;
35422 case ABI_DARWIN:
35423 case ABI_V4:
35424 ret = (TARGET_32BIT) ? 40 : 48;
35425 break;
35428 return ret;
35431 /* Emit RTL insns to initialize the variable parts of a trampoline.
35432 FNADDR is an RTX for the address of the function's pure code.
35433 CXT is an RTX for the static chain value for the function. */
35435 static void
35436 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
35438 int regsize = (TARGET_32BIT) ? 4 : 8;
35439 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
35440 rtx ctx_reg = force_reg (Pmode, cxt);
35441 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
35443 switch (DEFAULT_ABI)
35445 default:
35446 gcc_unreachable ();
35448 /* Under AIX, just build the 3 word function descriptor */
35449 case ABI_AIX:
35451 rtx fnmem, fn_reg, toc_reg;
35453 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
35454 error ("You cannot take the address of a nested function if you use "
35455 "the -mno-pointers-to-nested-functions option.");
35457 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
35458 fn_reg = gen_reg_rtx (Pmode);
35459 toc_reg = gen_reg_rtx (Pmode);
35461 /* Macro to shorten the code expansions below. */
35462 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
35464 m_tramp = replace_equiv_address (m_tramp, addr);
35466 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
35467 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
35468 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
35469 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
35470 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
35472 # undef MEM_PLUS
35474 break;
35476 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
35477 case ABI_ELFv2:
35478 case ABI_DARWIN:
35479 case ABI_V4:
35480 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
35481 LCT_NORMAL, VOIDmode, 4,
35482 addr, Pmode,
35483 GEN_INT (rs6000_trampoline_size ()), SImode,
35484 fnaddr, Pmode,
35485 ctx_reg, Pmode);
35486 break;
35491 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
35492 identifier as an argument, so the front end shouldn't look it up. */
35494 static bool
35495 rs6000_attribute_takes_identifier_p (const_tree attr_id)
35497 return is_attribute_p ("altivec", attr_id);
35500 /* Handle the "altivec" attribute. The attribute may have
35501 arguments as follows:
35503 __attribute__((altivec(vector__)))
35504 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
35505 __attribute__((altivec(bool__))) (always followed by 'unsigned')
35507 and may appear more than once (e.g., 'vector bool char') in a
35508 given declaration. */
35510 static tree
35511 rs6000_handle_altivec_attribute (tree *node,
35512 tree name ATTRIBUTE_UNUSED,
35513 tree args,
35514 int flags ATTRIBUTE_UNUSED,
35515 bool *no_add_attrs)
35517 tree type = *node, result = NULL_TREE;
35518 machine_mode mode;
35519 int unsigned_p;
35520 char altivec_type
35521 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
35522 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
35523 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
35524 : '?');
35526 while (POINTER_TYPE_P (type)
35527 || TREE_CODE (type) == FUNCTION_TYPE
35528 || TREE_CODE (type) == METHOD_TYPE
35529 || TREE_CODE (type) == ARRAY_TYPE)
35530 type = TREE_TYPE (type);
35532 mode = TYPE_MODE (type);
35534 /* Check for invalid AltiVec type qualifiers. */
35535 if (type == long_double_type_node)
35536 error ("use of %<long double%> in AltiVec types is invalid");
35537 else if (type == boolean_type_node)
35538 error ("use of boolean types in AltiVec types is invalid");
35539 else if (TREE_CODE (type) == COMPLEX_TYPE)
35540 error ("use of %<complex%> in AltiVec types is invalid");
35541 else if (DECIMAL_FLOAT_MODE_P (mode))
35542 error ("use of decimal floating point types in AltiVec types is invalid");
35543 else if (!TARGET_VSX)
35545 if (type == long_unsigned_type_node || type == long_integer_type_node)
35547 if (TARGET_64BIT)
35548 error ("use of %<long%> in AltiVec types is invalid for "
35549 "64-bit code without -mvsx");
35550 else if (rs6000_warn_altivec_long)
35551 warning (0, "use of %<long%> in AltiVec types is deprecated; "
35552 "use %<int%>");
35554 else if (type == long_long_unsigned_type_node
35555 || type == long_long_integer_type_node)
35556 error ("use of %<long long%> in AltiVec types is invalid without "
35557 "-mvsx");
35558 else if (type == double_type_node)
35559 error ("use of %<double%> in AltiVec types is invalid without -mvsx");
35562 switch (altivec_type)
35564 case 'v':
35565 unsigned_p = TYPE_UNSIGNED (type);
35566 switch (mode)
35568 case TImode:
35569 result = (unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node);
35570 break;
35571 case DImode:
35572 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
35573 break;
35574 case SImode:
35575 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
35576 break;
35577 case HImode:
35578 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
35579 break;
35580 case QImode:
35581 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
35582 break;
35583 case SFmode: result = V4SF_type_node; break;
35584 case DFmode: result = V2DF_type_node; break;
35585 /* If the user says 'vector int bool', we may be handed the 'bool'
35586 attribute _before_ the 'vector' attribute, and so select the
35587 proper type in the 'b' case below. */
35588 case V4SImode: case V8HImode: case V16QImode: case V4SFmode:
35589 case V2DImode: case V2DFmode:
35590 result = type;
35591 default: break;
35593 break;
35594 case 'b':
35595 switch (mode)
35597 case DImode: case V2DImode: result = bool_V2DI_type_node; break;
35598 case SImode: case V4SImode: result = bool_V4SI_type_node; break;
35599 case HImode: case V8HImode: result = bool_V8HI_type_node; break;
35600 case QImode: case V16QImode: result = bool_V16QI_type_node;
35601 default: break;
35603 break;
35604 case 'p':
35605 switch (mode)
35607 case V8HImode: result = pixel_V8HI_type_node;
35608 default: break;
35610 default: break;
35613 /* Propagate qualifiers attached to the element type
35614 onto the vector type. */
35615 if (result && result != type && TYPE_QUALS (type))
35616 result = build_qualified_type (result, TYPE_QUALS (type));
35618 *no_add_attrs = true; /* No need to hang on to the attribute. */
35620 if (result)
35621 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
35623 return NULL_TREE;
35626 /* AltiVec defines four built-in scalar types that serve as vector
35627 elements; we must teach the compiler how to mangle them. */
35629 static const char *
35630 rs6000_mangle_type (const_tree type)
35632 type = TYPE_MAIN_VARIANT (type);
35634 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
35635 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
35636 return NULL;
35638 if (type == bool_char_type_node) return "U6__boolc";
35639 if (type == bool_short_type_node) return "U6__bools";
35640 if (type == pixel_type_node) return "u7__pixel";
35641 if (type == bool_int_type_node) return "U6__booli";
35642 if (type == bool_long_type_node) return "U6__booll";
35644 /* Use a unique name for __float128 rather than trying to use "e" or "g". Use
35645 "g" for IBM extended double, no matter whether it is long double (using
35646 -mabi=ibmlongdouble) or the distinct __ibm128 type. */
35647 if (TARGET_FLOAT128_TYPE)
35649 if (type == ieee128_float_type_node)
35650 return "U10__float128";
35652 if (type == ibm128_float_type_node)
35653 return "g";
35655 if (type == long_double_type_node && TARGET_LONG_DOUBLE_128)
35656 return (TARGET_IEEEQUAD) ? "U10__float128" : "g";
35659 /* Mangle IBM extended float long double as `g' (__float128) on
35660 powerpc*-linux where long-double-64 previously was the default. */
35661 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
35662 && TARGET_ELF
35663 && TARGET_LONG_DOUBLE_128
35664 && !TARGET_IEEEQUAD)
35665 return "g";
35667 /* For all other types, use normal C++ mangling. */
35668 return NULL;
35671 /* Handle a "longcall" or "shortcall" attribute; arguments as in
35672 struct attribute_spec.handler. */
35674 static tree
35675 rs6000_handle_longcall_attribute (tree *node, tree name,
35676 tree args ATTRIBUTE_UNUSED,
35677 int flags ATTRIBUTE_UNUSED,
35678 bool *no_add_attrs)
35680 if (TREE_CODE (*node) != FUNCTION_TYPE
35681 && TREE_CODE (*node) != FIELD_DECL
35682 && TREE_CODE (*node) != TYPE_DECL)
35684 warning (OPT_Wattributes, "%qE attribute only applies to functions",
35685 name);
35686 *no_add_attrs = true;
35689 return NULL_TREE;
35692 /* Set longcall attributes on all functions declared when
35693 rs6000_default_long_calls is true. */
35694 static void
35695 rs6000_set_default_type_attributes (tree type)
35697 if (rs6000_default_long_calls
35698 && (TREE_CODE (type) == FUNCTION_TYPE
35699 || TREE_CODE (type) == METHOD_TYPE))
35700 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
35701 NULL_TREE,
35702 TYPE_ATTRIBUTES (type));
35704 #if TARGET_MACHO
35705 darwin_set_default_type_attributes (type);
35706 #endif
35709 /* Return a reference suitable for calling a function with the
35710 longcall attribute. */
35713 rs6000_longcall_ref (rtx call_ref)
35715 const char *call_name;
35716 tree node;
35718 if (GET_CODE (call_ref) != SYMBOL_REF)
35719 return call_ref;
35721 /* System V adds '.' to the internal name, so skip them. */
35722 call_name = XSTR (call_ref, 0);
35723 if (*call_name == '.')
35725 while (*call_name == '.')
35726 call_name++;
35728 node = get_identifier (call_name);
35729 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
35732 return force_reg (Pmode, call_ref);
35735 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
35736 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
35737 #endif
35739 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
35740 struct attribute_spec.handler. */
35741 static tree
35742 rs6000_handle_struct_attribute (tree *node, tree name,
35743 tree args ATTRIBUTE_UNUSED,
35744 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
35746 tree *type = NULL;
35747 if (DECL_P (*node))
35749 if (TREE_CODE (*node) == TYPE_DECL)
35750 type = &TREE_TYPE (*node);
35752 else
35753 type = node;
35755 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
35756 || TREE_CODE (*type) == UNION_TYPE)))
35758 warning (OPT_Wattributes, "%qE attribute ignored", name);
35759 *no_add_attrs = true;
35762 else if ((is_attribute_p ("ms_struct", name)
35763 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
35764 || ((is_attribute_p ("gcc_struct", name)
35765 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
35767 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
35768 name);
35769 *no_add_attrs = true;
35772 return NULL_TREE;
35775 static bool
35776 rs6000_ms_bitfield_layout_p (const_tree record_type)
35778 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
35779 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
35780 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
35783 #ifdef USING_ELFOS_H
35785 /* A get_unnamed_section callback, used for switching to toc_section. */
35787 static void
35788 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
35790 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
35791 && TARGET_MINIMAL_TOC)
35793 if (!toc_initialized)
35795 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
35796 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
35797 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
35798 fprintf (asm_out_file, "\t.tc ");
35799 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
35800 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
35801 fprintf (asm_out_file, "\n");
35803 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
35804 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
35805 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
35806 fprintf (asm_out_file, " = .+32768\n");
35807 toc_initialized = 1;
35809 else
35810 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
35812 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
35814 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
35815 if (!toc_initialized)
35817 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
35818 toc_initialized = 1;
35821 else
35823 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
35824 if (!toc_initialized)
35826 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
35827 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
35828 fprintf (asm_out_file, " = .+32768\n");
35829 toc_initialized = 1;
35834 /* Implement TARGET_ASM_INIT_SECTIONS. */
35836 static void
35837 rs6000_elf_asm_init_sections (void)
35839 toc_section
35840 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
35842 sdata2_section
35843 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
35844 SDATA2_SECTION_ASM_OP);
35847 /* Implement TARGET_SELECT_RTX_SECTION. */
35849 static section *
35850 rs6000_elf_select_rtx_section (machine_mode mode, rtx x,
35851 unsigned HOST_WIDE_INT align)
35853 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
35854 return toc_section;
35855 else
35856 return default_elf_select_rtx_section (mode, x, align);
35859 /* For a SYMBOL_REF, set generic flags and then perform some
35860 target-specific processing.
35862 When the AIX ABI is requested on a non-AIX system, replace the
35863 function name with the real name (with a leading .) rather than the
35864 function descriptor name. This saves a lot of overriding code to
35865 read the prefixes. */
35867 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
35868 static void
35869 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
35871 default_encode_section_info (decl, rtl, first);
35873 if (first
35874 && TREE_CODE (decl) == FUNCTION_DECL
35875 && !TARGET_AIX
35876 && DEFAULT_ABI == ABI_AIX)
35878 rtx sym_ref = XEXP (rtl, 0);
35879 size_t len = strlen (XSTR (sym_ref, 0));
35880 char *str = XALLOCAVEC (char, len + 2);
35881 str[0] = '.';
35882 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
35883 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
35887 static inline bool
35888 compare_section_name (const char *section, const char *templ)
35890 int len;
35892 len = strlen (templ);
35893 return (strncmp (section, templ, len) == 0
35894 && (section[len] == 0 || section[len] == '.'));
35897 bool
35898 rs6000_elf_in_small_data_p (const_tree decl)
35900 if (rs6000_sdata == SDATA_NONE)
35901 return false;
35903 /* We want to merge strings, so we never consider them small data. */
35904 if (TREE_CODE (decl) == STRING_CST)
35905 return false;
35907 /* Functions are never in the small data area. */
35908 if (TREE_CODE (decl) == FUNCTION_DECL)
35909 return false;
35911 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
35913 const char *section = DECL_SECTION_NAME (decl);
35914 if (compare_section_name (section, ".sdata")
35915 || compare_section_name (section, ".sdata2")
35916 || compare_section_name (section, ".gnu.linkonce.s")
35917 || compare_section_name (section, ".sbss")
35918 || compare_section_name (section, ".sbss2")
35919 || compare_section_name (section, ".gnu.linkonce.sb")
35920 || strcmp (section, ".PPC.EMB.sdata0") == 0
35921 || strcmp (section, ".PPC.EMB.sbss0") == 0)
35922 return true;
35924 else
35926 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
35928 if (size > 0
35929 && size <= g_switch_value
35930 /* If it's not public, and we're not going to reference it there,
35931 there's no need to put it in the small data section. */
35932 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
35933 return true;
35936 return false;
35939 #endif /* USING_ELFOS_H */
35941 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
35943 static bool
35944 rs6000_use_blocks_for_constant_p (machine_mode mode, const_rtx x)
35946 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
35949 /* Do not place thread-local symbols refs in the object blocks. */
35951 static bool
35952 rs6000_use_blocks_for_decl_p (const_tree decl)
35954 return !DECL_THREAD_LOCAL_P (decl);
35957 /* Return a REG that occurs in ADDR with coefficient 1.
35958 ADDR can be effectively incremented by incrementing REG.
35960 r0 is special and we must not select it as an address
35961 register by this routine since our caller will try to
35962 increment the returned register via an "la" instruction. */
35965 find_addr_reg (rtx addr)
35967 while (GET_CODE (addr) == PLUS)
35969 if (GET_CODE (XEXP (addr, 0)) == REG
35970 && REGNO (XEXP (addr, 0)) != 0)
35971 addr = XEXP (addr, 0);
35972 else if (GET_CODE (XEXP (addr, 1)) == REG
35973 && REGNO (XEXP (addr, 1)) != 0)
35974 addr = XEXP (addr, 1);
35975 else if (CONSTANT_P (XEXP (addr, 0)))
35976 addr = XEXP (addr, 1);
35977 else if (CONSTANT_P (XEXP (addr, 1)))
35978 addr = XEXP (addr, 0);
35979 else
35980 gcc_unreachable ();
35982 gcc_assert (GET_CODE (addr) == REG && REGNO (addr) != 0);
35983 return addr;
35986 void
35987 rs6000_fatal_bad_address (rtx op)
35989 fatal_insn ("bad address", op);
35992 #if TARGET_MACHO
35994 typedef struct branch_island_d {
35995 tree function_name;
35996 tree label_name;
35997 int line_number;
35998 } branch_island;
36001 static vec<branch_island, va_gc> *branch_islands;
36003 /* Remember to generate a branch island for far calls to the given
36004 function. */
36006 static void
36007 add_compiler_branch_island (tree label_name, tree function_name,
36008 int line_number)
36010 branch_island bi = {function_name, label_name, line_number};
36011 vec_safe_push (branch_islands, bi);
36014 /* Generate far-jump branch islands for everything recorded in
36015 branch_islands. Invoked immediately after the last instruction of
36016 the epilogue has been emitted; the branch islands must be appended
36017 to, and contiguous with, the function body. Mach-O stubs are
36018 generated in machopic_output_stub(). */
36020 static void
36021 macho_branch_islands (void)
36023 char tmp_buf[512];
36025 while (!vec_safe_is_empty (branch_islands))
36027 branch_island *bi = &branch_islands->last ();
36028 const char *label = IDENTIFIER_POINTER (bi->label_name);
36029 const char *name = IDENTIFIER_POINTER (bi->function_name);
36030 char name_buf[512];
36031 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
36032 if (name[0] == '*' || name[0] == '&')
36033 strcpy (name_buf, name+1);
36034 else
36036 name_buf[0] = '_';
36037 strcpy (name_buf+1, name);
36039 strcpy (tmp_buf, "\n");
36040 strcat (tmp_buf, label);
36041 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
36042 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
36043 dbxout_stabd (N_SLINE, bi->line_number);
36044 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
36045 if (flag_pic)
36047 if (TARGET_LINK_STACK)
36049 char name[32];
36050 get_ppc476_thunk_name (name);
36051 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
36052 strcat (tmp_buf, name);
36053 strcat (tmp_buf, "\n");
36054 strcat (tmp_buf, label);
36055 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
36057 else
36059 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
36060 strcat (tmp_buf, label);
36061 strcat (tmp_buf, "_pic\n");
36062 strcat (tmp_buf, label);
36063 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
36066 strcat (tmp_buf, "\taddis r11,r11,ha16(");
36067 strcat (tmp_buf, name_buf);
36068 strcat (tmp_buf, " - ");
36069 strcat (tmp_buf, label);
36070 strcat (tmp_buf, "_pic)\n");
36072 strcat (tmp_buf, "\tmtlr r0\n");
36074 strcat (tmp_buf, "\taddi r12,r11,lo16(");
36075 strcat (tmp_buf, name_buf);
36076 strcat (tmp_buf, " - ");
36077 strcat (tmp_buf, label);
36078 strcat (tmp_buf, "_pic)\n");
36080 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
36082 else
36084 strcat (tmp_buf, ":\nlis r12,hi16(");
36085 strcat (tmp_buf, name_buf);
36086 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
36087 strcat (tmp_buf, name_buf);
36088 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
36090 output_asm_insn (tmp_buf, 0);
36091 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
36092 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
36093 dbxout_stabd (N_SLINE, bi->line_number);
36094 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
36095 branch_islands->pop ();
36099 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
36100 already there or not. */
36102 static int
36103 no_previous_def (tree function_name)
36105 branch_island *bi;
36106 unsigned ix;
36108 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
36109 if (function_name == bi->function_name)
36110 return 0;
36111 return 1;
36114 /* GET_PREV_LABEL gets the label name from the previous definition of
36115 the function. */
36117 static tree
36118 get_prev_label (tree function_name)
36120 branch_island *bi;
36121 unsigned ix;
36123 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
36124 if (function_name == bi->function_name)
36125 return bi->label_name;
36126 return NULL_TREE;
36129 /* INSN is either a function call or a millicode call. It may have an
36130 unconditional jump in its delay slot.
36132 CALL_DEST is the routine we are calling. */
36134 char *
36135 output_call (rtx_insn *insn, rtx *operands, int dest_operand_number,
36136 int cookie_operand_number)
36138 static char buf[256];
36139 if (darwin_emit_branch_islands
36140 && GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
36141 && (INTVAL (operands[cookie_operand_number]) & CALL_LONG))
36143 tree labelname;
36144 tree funname = get_identifier (XSTR (operands[dest_operand_number], 0));
36146 if (no_previous_def (funname))
36148 rtx label_rtx = gen_label_rtx ();
36149 char *label_buf, temp_buf[256];
36150 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
36151 CODE_LABEL_NUMBER (label_rtx));
36152 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
36153 labelname = get_identifier (label_buf);
36154 add_compiler_branch_island (labelname, funname, insn_line (insn));
36156 else
36157 labelname = get_prev_label (funname);
36159 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
36160 instruction will reach 'foo', otherwise link as 'bl L42'".
36161 "L42" should be a 'branch island', that will do a far jump to
36162 'foo'. Branch islands are generated in
36163 macho_branch_islands(). */
36164 sprintf (buf, "jbsr %%z%d,%.246s",
36165 dest_operand_number, IDENTIFIER_POINTER (labelname));
36167 else
36168 sprintf (buf, "bl %%z%d", dest_operand_number);
36169 return buf;
36172 /* Generate PIC and indirect symbol stubs. */
36174 void
36175 machopic_output_stub (FILE *file, const char *symb, const char *stub)
36177 unsigned int length;
36178 char *symbol_name, *lazy_ptr_name;
36179 char *local_label_0;
36180 static int label = 0;
36182 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
36183 symb = (*targetm.strip_name_encoding) (symb);
36186 length = strlen (symb);
36187 symbol_name = XALLOCAVEC (char, length + 32);
36188 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
36190 lazy_ptr_name = XALLOCAVEC (char, length + 32);
36191 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
36193 if (flag_pic == 2)
36194 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
36195 else
36196 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
36198 if (flag_pic == 2)
36200 fprintf (file, "\t.align 5\n");
36202 fprintf (file, "%s:\n", stub);
36203 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
36205 label++;
36206 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
36207 sprintf (local_label_0, "\"L%011d$spb\"", label);
36209 fprintf (file, "\tmflr r0\n");
36210 if (TARGET_LINK_STACK)
36212 char name[32];
36213 get_ppc476_thunk_name (name);
36214 fprintf (file, "\tbl %s\n", name);
36215 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
36217 else
36219 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
36220 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
36222 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
36223 lazy_ptr_name, local_label_0);
36224 fprintf (file, "\tmtlr r0\n");
36225 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
36226 (TARGET_64BIT ? "ldu" : "lwzu"),
36227 lazy_ptr_name, local_label_0);
36228 fprintf (file, "\tmtctr r12\n");
36229 fprintf (file, "\tbctr\n");
36231 else
36233 fprintf (file, "\t.align 4\n");
36235 fprintf (file, "%s:\n", stub);
36236 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
36238 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
36239 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
36240 (TARGET_64BIT ? "ldu" : "lwzu"),
36241 lazy_ptr_name);
36242 fprintf (file, "\tmtctr r12\n");
36243 fprintf (file, "\tbctr\n");
36246 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
36247 fprintf (file, "%s:\n", lazy_ptr_name);
36248 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
36249 fprintf (file, "%sdyld_stub_binding_helper\n",
36250 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
36253 /* Legitimize PIC addresses. If the address is already
36254 position-independent, we return ORIG. Newly generated
36255 position-independent addresses go into a reg. This is REG if non
36256 zero, otherwise we allocate register(s) as necessary. */
36258 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
36261 rs6000_machopic_legitimize_pic_address (rtx orig, machine_mode mode,
36262 rtx reg)
36264 rtx base, offset;
36266 if (reg == NULL && ! reload_in_progress && ! reload_completed)
36267 reg = gen_reg_rtx (Pmode);
36269 if (GET_CODE (orig) == CONST)
36271 rtx reg_temp;
36273 if (GET_CODE (XEXP (orig, 0)) == PLUS
36274 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
36275 return orig;
36277 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
36279 /* Use a different reg for the intermediate value, as
36280 it will be marked UNCHANGING. */
36281 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
36282 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
36283 Pmode, reg_temp);
36284 offset =
36285 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
36286 Pmode, reg);
36288 if (GET_CODE (offset) == CONST_INT)
36290 if (SMALL_INT (offset))
36291 return plus_constant (Pmode, base, INTVAL (offset));
36292 else if (! reload_in_progress && ! reload_completed)
36293 offset = force_reg (Pmode, offset);
36294 else
36296 rtx mem = force_const_mem (Pmode, orig);
36297 return machopic_legitimize_pic_address (mem, Pmode, reg);
36300 return gen_rtx_PLUS (Pmode, base, offset);
36303 /* Fall back on generic machopic code. */
36304 return machopic_legitimize_pic_address (orig, mode, reg);
36307 /* Output a .machine directive for the Darwin assembler, and call
36308 the generic start_file routine. */
36310 static void
36311 rs6000_darwin_file_start (void)
36313 static const struct
36315 const char *arg;
36316 const char *name;
36317 HOST_WIDE_INT if_set;
36318 } mapping[] = {
36319 { "ppc64", "ppc64", MASK_64BIT },
36320 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
36321 { "power4", "ppc970", 0 },
36322 { "G5", "ppc970", 0 },
36323 { "7450", "ppc7450", 0 },
36324 { "7400", "ppc7400", MASK_ALTIVEC },
36325 { "G4", "ppc7400", 0 },
36326 { "750", "ppc750", 0 },
36327 { "740", "ppc750", 0 },
36328 { "G3", "ppc750", 0 },
36329 { "604e", "ppc604e", 0 },
36330 { "604", "ppc604", 0 },
36331 { "603e", "ppc603", 0 },
36332 { "603", "ppc603", 0 },
36333 { "601", "ppc601", 0 },
36334 { NULL, "ppc", 0 } };
36335 const char *cpu_id = "";
36336 size_t i;
36338 rs6000_file_start ();
36339 darwin_file_start ();
36341 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
36343 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
36344 cpu_id = rs6000_default_cpu;
36346 if (global_options_set.x_rs6000_cpu_index)
36347 cpu_id = processor_target_table[rs6000_cpu_index].name;
36349 /* Look through the mapping array. Pick the first name that either
36350 matches the argument, has a bit set in IF_SET that is also set
36351 in the target flags, or has a NULL name. */
36353 i = 0;
36354 while (mapping[i].arg != NULL
36355 && strcmp (mapping[i].arg, cpu_id) != 0
36356 && (mapping[i].if_set & rs6000_isa_flags) == 0)
36357 i++;
36359 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
36362 #endif /* TARGET_MACHO */
36364 #if TARGET_ELF
36365 static int
36366 rs6000_elf_reloc_rw_mask (void)
36368 if (flag_pic)
36369 return 3;
36370 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
36371 return 2;
36372 else
36373 return 0;
36376 /* Record an element in the table of global constructors. SYMBOL is
36377 a SYMBOL_REF of the function to be called; PRIORITY is a number
36378 between 0 and MAX_INIT_PRIORITY.
36380 This differs from default_named_section_asm_out_constructor in
36381 that we have special handling for -mrelocatable. */
36383 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
36384 static void
36385 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
36387 const char *section = ".ctors";
36388 char buf[18];
36390 if (priority != DEFAULT_INIT_PRIORITY)
36392 sprintf (buf, ".ctors.%.5u",
36393 /* Invert the numbering so the linker puts us in the proper
36394 order; constructors are run from right to left, and the
36395 linker sorts in increasing order. */
36396 MAX_INIT_PRIORITY - priority);
36397 section = buf;
36400 switch_to_section (get_section (section, SECTION_WRITE, NULL));
36401 assemble_align (POINTER_SIZE);
36403 if (DEFAULT_ABI == ABI_V4
36404 && (TARGET_RELOCATABLE || flag_pic > 1))
36406 fputs ("\t.long (", asm_out_file);
36407 output_addr_const (asm_out_file, symbol);
36408 fputs (")@fixup\n", asm_out_file);
36410 else
36411 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
36414 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
36415 static void
36416 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
36418 const char *section = ".dtors";
36419 char buf[18];
36421 if (priority != DEFAULT_INIT_PRIORITY)
36423 sprintf (buf, ".dtors.%.5u",
36424 /* Invert the numbering so the linker puts us in the proper
36425 order; constructors are run from right to left, and the
36426 linker sorts in increasing order. */
36427 MAX_INIT_PRIORITY - priority);
36428 section = buf;
36431 switch_to_section (get_section (section, SECTION_WRITE, NULL));
36432 assemble_align (POINTER_SIZE);
36434 if (DEFAULT_ABI == ABI_V4
36435 && (TARGET_RELOCATABLE || flag_pic > 1))
36437 fputs ("\t.long (", asm_out_file);
36438 output_addr_const (asm_out_file, symbol);
36439 fputs (")@fixup\n", asm_out_file);
36441 else
36442 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
36445 void
36446 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
36448 if (TARGET_64BIT && DEFAULT_ABI != ABI_ELFv2)
36450 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
36451 ASM_OUTPUT_LABEL (file, name);
36452 fputs (DOUBLE_INT_ASM_OP, file);
36453 rs6000_output_function_entry (file, name);
36454 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
36455 if (DOT_SYMBOLS)
36457 fputs ("\t.size\t", file);
36458 assemble_name (file, name);
36459 fputs (",24\n\t.type\t.", file);
36460 assemble_name (file, name);
36461 fputs (",@function\n", file);
36462 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
36464 fputs ("\t.globl\t.", file);
36465 assemble_name (file, name);
36466 putc ('\n', file);
36469 else
36470 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
36471 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
36472 rs6000_output_function_entry (file, name);
36473 fputs (":\n", file);
36474 return;
36477 if (DEFAULT_ABI == ABI_V4
36478 && (TARGET_RELOCATABLE || flag_pic > 1)
36479 && !TARGET_SECURE_PLT
36480 && (!constant_pool_empty_p () || crtl->profile)
36481 && uses_TOC ())
36483 char buf[256];
36485 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
36487 fprintf (file, "\t.long ");
36488 assemble_name (file, toc_label_name);
36489 need_toc_init = 1;
36490 putc ('-', file);
36491 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
36492 assemble_name (file, buf);
36493 putc ('\n', file);
36496 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
36497 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
36499 if (TARGET_CMODEL == CMODEL_LARGE && rs6000_global_entry_point_needed_p ())
36501 char buf[256];
36503 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
36505 fprintf (file, "\t.quad .TOC.-");
36506 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
36507 assemble_name (file, buf);
36508 putc ('\n', file);
36511 if (DEFAULT_ABI == ABI_AIX)
36513 const char *desc_name, *orig_name;
36515 orig_name = (*targetm.strip_name_encoding) (name);
36516 desc_name = orig_name;
36517 while (*desc_name == '.')
36518 desc_name++;
36520 if (TREE_PUBLIC (decl))
36521 fprintf (file, "\t.globl %s\n", desc_name);
36523 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
36524 fprintf (file, "%s:\n", desc_name);
36525 fprintf (file, "\t.long %s\n", orig_name);
36526 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
36527 fputs ("\t.long 0\n", file);
36528 fprintf (file, "\t.previous\n");
36530 ASM_OUTPUT_LABEL (file, name);
36533 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
36534 static void
36535 rs6000_elf_file_end (void)
36537 #ifdef HAVE_AS_GNU_ATTRIBUTE
36538 /* ??? The value emitted depends on options active at file end.
36539 Assume anyone using #pragma or attributes that might change
36540 options knows what they are doing. */
36541 if ((TARGET_64BIT || DEFAULT_ABI == ABI_V4)
36542 && rs6000_passes_float)
36544 int fp;
36546 if (TARGET_DF_FPR | TARGET_DF_SPE)
36547 fp = 1;
36548 else if (TARGET_SF_FPR | TARGET_SF_SPE)
36549 fp = 3;
36550 else
36551 fp = 2;
36552 if (rs6000_passes_long_double)
36554 if (!TARGET_LONG_DOUBLE_128)
36555 fp |= 2 * 4;
36556 else if (TARGET_IEEEQUAD)
36557 fp |= 3 * 4;
36558 else
36559 fp |= 1 * 4;
36561 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n", fp);
36563 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
36565 if (rs6000_passes_vector)
36566 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
36567 (TARGET_ALTIVEC_ABI ? 2
36568 : TARGET_SPE_ABI ? 3
36569 : 1));
36570 if (rs6000_returns_struct)
36571 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
36572 aix_struct_return ? 2 : 1);
36574 #endif
36575 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
36576 if (TARGET_32BIT || DEFAULT_ABI == ABI_ELFv2)
36577 file_end_indicate_exec_stack ();
36578 #endif
36580 if (flag_split_stack)
36581 file_end_indicate_split_stack ();
36583 if (cpu_builtin_p)
36585 /* We have expanded a CPU builtin, so we need to emit a reference to
36586 the special symbol that LIBC uses to declare it supports the
36587 AT_PLATFORM and AT_HWCAP/AT_HWCAP2 in the TCB feature. */
36588 switch_to_section (data_section);
36589 fprintf (asm_out_file, "\t.align %u\n", TARGET_32BIT ? 2 : 3);
36590 fprintf (asm_out_file, "\t%s %s\n",
36591 TARGET_32BIT ? ".long" : ".quad", tcb_verification_symbol);
36594 #endif
36596 #if TARGET_XCOFF
36598 #ifndef HAVE_XCOFF_DWARF_EXTRAS
36599 #define HAVE_XCOFF_DWARF_EXTRAS 0
36600 #endif
36602 static enum unwind_info_type
36603 rs6000_xcoff_debug_unwind_info (void)
36605 return UI_NONE;
36608 static void
36609 rs6000_xcoff_asm_output_anchor (rtx symbol)
36611 char buffer[100];
36613 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
36614 SYMBOL_REF_BLOCK_OFFSET (symbol));
36615 fprintf (asm_out_file, "%s", SET_ASM_OP);
36616 RS6000_OUTPUT_BASENAME (asm_out_file, XSTR (symbol, 0));
36617 fprintf (asm_out_file, ",");
36618 RS6000_OUTPUT_BASENAME (asm_out_file, buffer);
36619 fprintf (asm_out_file, "\n");
36622 static void
36623 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
36625 fputs (GLOBAL_ASM_OP, stream);
36626 RS6000_OUTPUT_BASENAME (stream, name);
36627 putc ('\n', stream);
36630 /* A get_unnamed_decl callback, used for read-only sections. PTR
36631 points to the section string variable. */
36633 static void
36634 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
36636 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
36637 *(const char *const *) directive,
36638 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
36641 /* Likewise for read-write sections. */
36643 static void
36644 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
36646 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
36647 *(const char *const *) directive,
36648 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
36651 static void
36652 rs6000_xcoff_output_tls_section_asm_op (const void *directive)
36654 fprintf (asm_out_file, "\t.csect %s[TL],%s\n",
36655 *(const char *const *) directive,
36656 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
36659 /* A get_unnamed_section callback, used for switching to toc_section. */
36661 static void
36662 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
36664 if (TARGET_MINIMAL_TOC)
36666 /* toc_section is always selected at least once from
36667 rs6000_xcoff_file_start, so this is guaranteed to
36668 always be defined once and only once in each file. */
36669 if (!toc_initialized)
36671 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
36672 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
36673 toc_initialized = 1;
36675 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
36676 (TARGET_32BIT ? "" : ",3"));
36678 else
36679 fputs ("\t.toc\n", asm_out_file);
36682 /* Implement TARGET_ASM_INIT_SECTIONS. */
36684 static void
36685 rs6000_xcoff_asm_init_sections (void)
36687 read_only_data_section
36688 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
36689 &xcoff_read_only_section_name);
36691 private_data_section
36692 = get_unnamed_section (SECTION_WRITE,
36693 rs6000_xcoff_output_readwrite_section_asm_op,
36694 &xcoff_private_data_section_name);
36696 tls_data_section
36697 = get_unnamed_section (SECTION_TLS,
36698 rs6000_xcoff_output_tls_section_asm_op,
36699 &xcoff_tls_data_section_name);
36701 tls_private_data_section
36702 = get_unnamed_section (SECTION_TLS,
36703 rs6000_xcoff_output_tls_section_asm_op,
36704 &xcoff_private_data_section_name);
36706 read_only_private_data_section
36707 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
36708 &xcoff_private_data_section_name);
36710 toc_section
36711 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
36713 readonly_data_section = read_only_data_section;
36716 static int
36717 rs6000_xcoff_reloc_rw_mask (void)
36719 return 3;
36722 static void
36723 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
36724 tree decl ATTRIBUTE_UNUSED)
36726 int smclass;
36727 static const char * const suffix[5] = { "PR", "RO", "RW", "TL", "XO" };
36729 if (flags & SECTION_EXCLUDE)
36730 smclass = 4;
36731 else if (flags & SECTION_DEBUG)
36733 fprintf (asm_out_file, "\t.dwsect %s\n", name);
36734 return;
36736 else if (flags & SECTION_CODE)
36737 smclass = 0;
36738 else if (flags & SECTION_TLS)
36739 smclass = 3;
36740 else if (flags & SECTION_WRITE)
36741 smclass = 2;
36742 else
36743 smclass = 1;
36745 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
36746 (flags & SECTION_CODE) ? "." : "",
36747 name, suffix[smclass], flags & SECTION_ENTSIZE);
36750 #define IN_NAMED_SECTION(DECL) \
36751 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
36752 && DECL_SECTION_NAME (DECL) != NULL)
36754 static section *
36755 rs6000_xcoff_select_section (tree decl, int reloc,
36756 unsigned HOST_WIDE_INT align)
36758 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
36759 named section. */
36760 if (align > BIGGEST_ALIGNMENT)
36762 resolve_unique_section (decl, reloc, true);
36763 if (IN_NAMED_SECTION (decl))
36764 return get_named_section (decl, NULL, reloc);
36767 if (decl_readonly_section (decl, reloc))
36769 if (TREE_PUBLIC (decl))
36770 return read_only_data_section;
36771 else
36772 return read_only_private_data_section;
36774 else
36776 #if HAVE_AS_TLS
36777 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
36779 if (TREE_PUBLIC (decl))
36780 return tls_data_section;
36781 else if (bss_initializer_p (decl))
36783 /* Convert to COMMON to emit in BSS. */
36784 DECL_COMMON (decl) = 1;
36785 return tls_comm_section;
36787 else
36788 return tls_private_data_section;
36790 else
36791 #endif
36792 if (TREE_PUBLIC (decl))
36793 return data_section;
36794 else
36795 return private_data_section;
36799 static void
36800 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
36802 const char *name;
36804 /* Use select_section for private data and uninitialized data with
36805 alignment <= BIGGEST_ALIGNMENT. */
36806 if (!TREE_PUBLIC (decl)
36807 || DECL_COMMON (decl)
36808 || (DECL_INITIAL (decl) == NULL_TREE
36809 && DECL_ALIGN (decl) <= BIGGEST_ALIGNMENT)
36810 || DECL_INITIAL (decl) == error_mark_node
36811 || (flag_zero_initialized_in_bss
36812 && initializer_zerop (DECL_INITIAL (decl))))
36813 return;
36815 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
36816 name = (*targetm.strip_name_encoding) (name);
36817 set_decl_section_name (decl, name);
36820 /* Select section for constant in constant pool.
36822 On RS/6000, all constants are in the private read-only data area.
36823 However, if this is being placed in the TOC it must be output as a
36824 toc entry. */
36826 static section *
36827 rs6000_xcoff_select_rtx_section (machine_mode mode, rtx x,
36828 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
36830 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
36831 return toc_section;
36832 else
36833 return read_only_private_data_section;
36836 /* Remove any trailing [DS] or the like from the symbol name. */
36838 static const char *
36839 rs6000_xcoff_strip_name_encoding (const char *name)
36841 size_t len;
36842 if (*name == '*')
36843 name++;
36844 len = strlen (name);
36845 if (name[len - 1] == ']')
36846 return ggc_alloc_string (name, len - 4);
36847 else
36848 return name;
36851 /* Section attributes. AIX is always PIC. */
36853 static unsigned int
36854 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
36856 unsigned int align;
36857 unsigned int flags = default_section_type_flags (decl, name, reloc);
36859 /* Align to at least UNIT size. */
36860 if ((flags & SECTION_CODE) != 0 || !decl || !DECL_P (decl))
36861 align = MIN_UNITS_PER_WORD;
36862 else
36863 /* Increase alignment of large objects if not already stricter. */
36864 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
36865 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
36866 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
36868 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
36871 /* Output at beginning of assembler file.
36873 Initialize the section names for the RS/6000 at this point.
36875 Specify filename, including full path, to assembler.
36877 We want to go into the TOC section so at least one .toc will be emitted.
36878 Also, in order to output proper .bs/.es pairs, we need at least one static
36879 [RW] section emitted.
36881 Finally, declare mcount when profiling to make the assembler happy. */
36883 static void
36884 rs6000_xcoff_file_start (void)
36886 rs6000_gen_section_name (&xcoff_bss_section_name,
36887 main_input_filename, ".bss_");
36888 rs6000_gen_section_name (&xcoff_private_data_section_name,
36889 main_input_filename, ".rw_");
36890 rs6000_gen_section_name (&xcoff_read_only_section_name,
36891 main_input_filename, ".ro_");
36892 rs6000_gen_section_name (&xcoff_tls_data_section_name,
36893 main_input_filename, ".tls_");
36894 rs6000_gen_section_name (&xcoff_tbss_section_name,
36895 main_input_filename, ".tbss_[UL]");
36897 fputs ("\t.file\t", asm_out_file);
36898 output_quoted_string (asm_out_file, main_input_filename);
36899 fputc ('\n', asm_out_file);
36900 if (write_symbols != NO_DEBUG)
36901 switch_to_section (private_data_section);
36902 switch_to_section (toc_section);
36903 switch_to_section (text_section);
36904 if (profile_flag)
36905 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
36906 rs6000_file_start ();
36909 /* Output at end of assembler file.
36910 On the RS/6000, referencing data should automatically pull in text. */
36912 static void
36913 rs6000_xcoff_file_end (void)
36915 switch_to_section (text_section);
36916 fputs ("_section_.text:\n", asm_out_file);
36917 switch_to_section (data_section);
36918 fputs (TARGET_32BIT
36919 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
36920 asm_out_file);
36923 struct declare_alias_data
36925 FILE *file;
36926 bool function_descriptor;
36929 /* Declare alias N. A helper function for for_node_and_aliases. */
36931 static bool
36932 rs6000_declare_alias (struct symtab_node *n, void *d)
36934 struct declare_alias_data *data = (struct declare_alias_data *)d;
36935 /* Main symbol is output specially, because varasm machinery does part of
36936 the job for us - we do not need to declare .globl/lglobs and such. */
36937 if (!n->alias || n->weakref)
36938 return false;
36940 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n->decl)))
36941 return false;
36943 /* Prevent assemble_alias from trying to use .set pseudo operation
36944 that does not behave as expected by the middle-end. */
36945 TREE_ASM_WRITTEN (n->decl) = true;
36947 const char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n->decl));
36948 char *buffer = (char *) alloca (strlen (name) + 2);
36949 char *p;
36950 int dollar_inside = 0;
36952 strcpy (buffer, name);
36953 p = strchr (buffer, '$');
36954 while (p) {
36955 *p = '_';
36956 dollar_inside++;
36957 p = strchr (p + 1, '$');
36959 if (TREE_PUBLIC (n->decl))
36961 if (!RS6000_WEAK || !DECL_WEAK (n->decl))
36963 if (dollar_inside) {
36964 if (data->function_descriptor)
36965 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
36966 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
36968 if (data->function_descriptor)
36970 fputs ("\t.globl .", data->file);
36971 RS6000_OUTPUT_BASENAME (data->file, buffer);
36972 putc ('\n', data->file);
36974 fputs ("\t.globl ", data->file);
36975 RS6000_OUTPUT_BASENAME (data->file, buffer);
36976 putc ('\n', data->file);
36978 #ifdef ASM_WEAKEN_DECL
36979 else if (DECL_WEAK (n->decl) && !data->function_descriptor)
36980 ASM_WEAKEN_DECL (data->file, n->decl, name, NULL);
36981 #endif
36983 else
36985 if (dollar_inside)
36987 if (data->function_descriptor)
36988 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
36989 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
36991 if (data->function_descriptor)
36993 fputs ("\t.lglobl .", data->file);
36994 RS6000_OUTPUT_BASENAME (data->file, buffer);
36995 putc ('\n', data->file);
36997 fputs ("\t.lglobl ", data->file);
36998 RS6000_OUTPUT_BASENAME (data->file, buffer);
36999 putc ('\n', data->file);
37001 if (data->function_descriptor)
37002 fputs (".", data->file);
37003 RS6000_OUTPUT_BASENAME (data->file, buffer);
37004 fputs (":\n", data->file);
37005 return false;
37009 #ifdef HAVE_GAS_HIDDEN
37010 /* Helper function to calculate visibility of a DECL
37011 and return the value as a const string. */
37013 static const char *
37014 rs6000_xcoff_visibility (tree decl)
37016 static const char * const visibility_types[] = {
37017 "", ",protected", ",hidden", ",internal"
37020 enum symbol_visibility vis = DECL_VISIBILITY (decl);
37022 if (TREE_CODE (decl) == FUNCTION_DECL
37023 && cgraph_node::get (decl)
37024 && cgraph_node::get (decl)->instrumentation_clone
37025 && cgraph_node::get (decl)->instrumented_version)
37026 vis = DECL_VISIBILITY (cgraph_node::get (decl)->instrumented_version->decl);
37028 return visibility_types[vis];
37030 #endif
37033 /* This macro produces the initial definition of a function name.
37034 On the RS/6000, we need to place an extra '.' in the function name and
37035 output the function descriptor.
37036 Dollar signs are converted to underscores.
37038 The csect for the function will have already been created when
37039 text_section was selected. We do have to go back to that csect, however.
37041 The third and fourth parameters to the .function pseudo-op (16 and 044)
37042 are placeholders which no longer have any use.
37044 Because AIX assembler's .set command has unexpected semantics, we output
37045 all aliases as alternative labels in front of the definition. */
37047 void
37048 rs6000_xcoff_declare_function_name (FILE *file, const char *name, tree decl)
37050 char *buffer = (char *) alloca (strlen (name) + 1);
37051 char *p;
37052 int dollar_inside = 0;
37053 struct declare_alias_data data = {file, false};
37055 strcpy (buffer, name);
37056 p = strchr (buffer, '$');
37057 while (p) {
37058 *p = '_';
37059 dollar_inside++;
37060 p = strchr (p + 1, '$');
37062 if (TREE_PUBLIC (decl))
37064 if (!RS6000_WEAK || !DECL_WEAK (decl))
37066 if (dollar_inside) {
37067 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
37068 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
37070 fputs ("\t.globl .", file);
37071 RS6000_OUTPUT_BASENAME (file, buffer);
37072 #ifdef HAVE_GAS_HIDDEN
37073 fputs (rs6000_xcoff_visibility (decl), file);
37074 #endif
37075 putc ('\n', file);
37078 else
37080 if (dollar_inside) {
37081 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
37082 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
37084 fputs ("\t.lglobl .", file);
37085 RS6000_OUTPUT_BASENAME (file, buffer);
37086 putc ('\n', file);
37088 fputs ("\t.csect ", file);
37089 RS6000_OUTPUT_BASENAME (file, buffer);
37090 fputs (TARGET_32BIT ? "[DS]\n" : "[DS],3\n", file);
37091 RS6000_OUTPUT_BASENAME (file, buffer);
37092 fputs (":\n", file);
37093 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
37094 &data, true);
37095 fputs (TARGET_32BIT ? "\t.long ." : "\t.llong .", file);
37096 RS6000_OUTPUT_BASENAME (file, buffer);
37097 fputs (", TOC[tc0], 0\n", file);
37098 in_section = NULL;
37099 switch_to_section (function_section (decl));
37100 putc ('.', file);
37101 RS6000_OUTPUT_BASENAME (file, buffer);
37102 fputs (":\n", file);
37103 data.function_descriptor = true;
37104 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
37105 &data, true);
37106 if (!DECL_IGNORED_P (decl))
37108 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
37109 xcoffout_declare_function (file, decl, buffer);
37110 else if (write_symbols == DWARF2_DEBUG)
37112 name = (*targetm.strip_name_encoding) (name);
37113 fprintf (file, "\t.function .%s,.%s,2,0\n", name, name);
37116 return;
37120 /* Output assembly language to globalize a symbol from a DECL,
37121 possibly with visibility. */
37123 void
37124 rs6000_xcoff_asm_globalize_decl_name (FILE *stream, tree decl)
37126 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
37127 fputs (GLOBAL_ASM_OP, stream);
37128 RS6000_OUTPUT_BASENAME (stream, name);
37129 #ifdef HAVE_GAS_HIDDEN
37130 fputs (rs6000_xcoff_visibility (decl), stream);
37131 #endif
37132 putc ('\n', stream);
37135 /* Output assembly language to define a symbol as COMMON from a DECL,
37136 possibly with visibility. */
37138 void
37139 rs6000_xcoff_asm_output_aligned_decl_common (FILE *stream,
37140 tree decl ATTRIBUTE_UNUSED,
37141 const char *name,
37142 unsigned HOST_WIDE_INT size,
37143 unsigned HOST_WIDE_INT align)
37145 unsigned HOST_WIDE_INT align2 = 2;
37147 if (align > 32)
37148 align2 = floor_log2 (align / BITS_PER_UNIT);
37149 else if (size > 4)
37150 align2 = 3;
37152 fputs (COMMON_ASM_OP, stream);
37153 RS6000_OUTPUT_BASENAME (stream, name);
37155 fprintf (stream,
37156 "," HOST_WIDE_INT_PRINT_UNSIGNED "," HOST_WIDE_INT_PRINT_UNSIGNED,
37157 size, align2);
37159 #ifdef HAVE_GAS_HIDDEN
37160 fputs (rs6000_xcoff_visibility (decl), stream);
37161 #endif
37162 putc ('\n', stream);
37165 /* This macro produces the initial definition of a object (variable) name.
37166 Because AIX assembler's .set command has unexpected semantics, we output
37167 all aliases as alternative labels in front of the definition. */
37169 void
37170 rs6000_xcoff_declare_object_name (FILE *file, const char *name, tree decl)
37172 struct declare_alias_data data = {file, false};
37173 RS6000_OUTPUT_BASENAME (file, name);
37174 fputs (":\n", file);
37175 symtab_node::get_create (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
37176 &data, true);
37179 /* Overide the default 'SYMBOL-.' syntax with AIX compatible 'SYMBOL-$'. */
37181 void
37182 rs6000_asm_output_dwarf_pcrel (FILE *file, int size, const char *label)
37184 fputs (integer_asm_op (size, FALSE), file);
37185 assemble_name (file, label);
37186 fputs ("-$", file);
37189 /* Output a symbol offset relative to the dbase for the current object.
37190 We use __gcc_unwind_dbase as an arbitrary base for dbase and assume
37191 signed offsets.
37193 __gcc_unwind_dbase is embedded in all executables/libraries through
37194 libgcc/config/rs6000/crtdbase.S. */
37196 void
37197 rs6000_asm_output_dwarf_datarel (FILE *file, int size, const char *label)
37199 fputs (integer_asm_op (size, FALSE), file);
37200 assemble_name (file, label);
37201 fputs("-__gcc_unwind_dbase", file);
37204 #ifdef HAVE_AS_TLS
37205 static void
37206 rs6000_xcoff_encode_section_info (tree decl, rtx rtl, int first)
37208 rtx symbol;
37209 int flags;
37210 const char *symname;
37212 default_encode_section_info (decl, rtl, first);
37214 /* Careful not to prod global register variables. */
37215 if (!MEM_P (rtl))
37216 return;
37217 symbol = XEXP (rtl, 0);
37218 if (GET_CODE (symbol) != SYMBOL_REF)
37219 return;
37221 flags = SYMBOL_REF_FLAGS (symbol);
37223 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
37224 flags &= ~SYMBOL_FLAG_HAS_BLOCK_INFO;
37226 SYMBOL_REF_FLAGS (symbol) = flags;
37228 /* Append mapping class to extern decls. */
37229 symname = XSTR (symbol, 0);
37230 if (decl /* sync condition with assemble_external () */
37231 && DECL_P (decl) && DECL_EXTERNAL (decl) && TREE_PUBLIC (decl)
37232 && ((TREE_CODE (decl) == VAR_DECL && !DECL_THREAD_LOCAL_P (decl))
37233 || TREE_CODE (decl) == FUNCTION_DECL)
37234 && symname[strlen (symname) - 1] != ']')
37236 char *newname = (char *) alloca (strlen (symname) + 5);
37237 strcpy (newname, symname);
37238 strcat (newname, (TREE_CODE (decl) == FUNCTION_DECL
37239 ? "[DS]" : "[UA]"));
37240 XSTR (symbol, 0) = ggc_strdup (newname);
37243 #endif /* HAVE_AS_TLS */
37244 #endif /* TARGET_XCOFF */
37246 void
37247 rs6000_asm_weaken_decl (FILE *stream, tree decl,
37248 const char *name, const char *val)
37250 fputs ("\t.weak\t", stream);
37251 RS6000_OUTPUT_BASENAME (stream, name);
37252 if (decl && TREE_CODE (decl) == FUNCTION_DECL
37253 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
37255 if (TARGET_XCOFF)
37256 fputs ("[DS]", stream);
37257 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
37258 if (TARGET_XCOFF)
37259 fputs (rs6000_xcoff_visibility (decl), stream);
37260 #endif
37261 fputs ("\n\t.weak\t.", stream);
37262 RS6000_OUTPUT_BASENAME (stream, name);
37264 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
37265 if (TARGET_XCOFF)
37266 fputs (rs6000_xcoff_visibility (decl), stream);
37267 #endif
37268 fputc ('\n', stream);
37269 if (val)
37271 #ifdef ASM_OUTPUT_DEF
37272 ASM_OUTPUT_DEF (stream, name, val);
37273 #endif
37274 if (decl && TREE_CODE (decl) == FUNCTION_DECL
37275 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
37277 fputs ("\t.set\t.", stream);
37278 RS6000_OUTPUT_BASENAME (stream, name);
37279 fputs (",.", stream);
37280 RS6000_OUTPUT_BASENAME (stream, val);
37281 fputc ('\n', stream);
37287 /* Return true if INSN should not be copied. */
37289 static bool
37290 rs6000_cannot_copy_insn_p (rtx_insn *insn)
37292 return recog_memoized (insn) >= 0
37293 && get_attr_cannot_copy (insn);
37296 /* Compute a (partial) cost for rtx X. Return true if the complete
37297 cost has been computed, and false if subexpressions should be
37298 scanned. In either case, *TOTAL contains the cost result. */
37300 static bool
37301 rs6000_rtx_costs (rtx x, machine_mode mode, int outer_code,
37302 int opno ATTRIBUTE_UNUSED, int *total, bool speed)
37304 int code = GET_CODE (x);
37306 switch (code)
37308 /* On the RS/6000, if it is valid in the insn, it is free. */
37309 case CONST_INT:
37310 if (((outer_code == SET
37311 || outer_code == PLUS
37312 || outer_code == MINUS)
37313 && (satisfies_constraint_I (x)
37314 || satisfies_constraint_L (x)))
37315 || (outer_code == AND
37316 && (satisfies_constraint_K (x)
37317 || (mode == SImode
37318 ? satisfies_constraint_L (x)
37319 : satisfies_constraint_J (x))))
37320 || ((outer_code == IOR || outer_code == XOR)
37321 && (satisfies_constraint_K (x)
37322 || (mode == SImode
37323 ? satisfies_constraint_L (x)
37324 : satisfies_constraint_J (x))))
37325 || outer_code == ASHIFT
37326 || outer_code == ASHIFTRT
37327 || outer_code == LSHIFTRT
37328 || outer_code == ROTATE
37329 || outer_code == ROTATERT
37330 || outer_code == ZERO_EXTRACT
37331 || (outer_code == MULT
37332 && satisfies_constraint_I (x))
37333 || ((outer_code == DIV || outer_code == UDIV
37334 || outer_code == MOD || outer_code == UMOD)
37335 && exact_log2 (INTVAL (x)) >= 0)
37336 || (outer_code == COMPARE
37337 && (satisfies_constraint_I (x)
37338 || satisfies_constraint_K (x)))
37339 || ((outer_code == EQ || outer_code == NE)
37340 && (satisfies_constraint_I (x)
37341 || satisfies_constraint_K (x)
37342 || (mode == SImode
37343 ? satisfies_constraint_L (x)
37344 : satisfies_constraint_J (x))))
37345 || (outer_code == GTU
37346 && satisfies_constraint_I (x))
37347 || (outer_code == LTU
37348 && satisfies_constraint_P (x)))
37350 *total = 0;
37351 return true;
37353 else if ((outer_code == PLUS
37354 && reg_or_add_cint_operand (x, VOIDmode))
37355 || (outer_code == MINUS
37356 && reg_or_sub_cint_operand (x, VOIDmode))
37357 || ((outer_code == SET
37358 || outer_code == IOR
37359 || outer_code == XOR)
37360 && (INTVAL (x)
37361 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
37363 *total = COSTS_N_INSNS (1);
37364 return true;
37366 /* FALLTHRU */
37368 case CONST_DOUBLE:
37369 case CONST_WIDE_INT:
37370 case CONST:
37371 case HIGH:
37372 case SYMBOL_REF:
37373 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
37374 return true;
37376 case MEM:
37377 /* When optimizing for size, MEM should be slightly more expensive
37378 than generating address, e.g., (plus (reg) (const)).
37379 L1 cache latency is about two instructions. */
37380 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
37381 if (SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (x)))
37382 *total += COSTS_N_INSNS (100);
37383 return true;
37385 case LABEL_REF:
37386 *total = 0;
37387 return true;
37389 case PLUS:
37390 case MINUS:
37391 if (FLOAT_MODE_P (mode))
37392 *total = rs6000_cost->fp;
37393 else
37394 *total = COSTS_N_INSNS (1);
37395 return false;
37397 case MULT:
37398 if (GET_CODE (XEXP (x, 1)) == CONST_INT
37399 && satisfies_constraint_I (XEXP (x, 1)))
37401 if (INTVAL (XEXP (x, 1)) >= -256
37402 && INTVAL (XEXP (x, 1)) <= 255)
37403 *total = rs6000_cost->mulsi_const9;
37404 else
37405 *total = rs6000_cost->mulsi_const;
37407 else if (mode == SFmode)
37408 *total = rs6000_cost->fp;
37409 else if (FLOAT_MODE_P (mode))
37410 *total = rs6000_cost->dmul;
37411 else if (mode == DImode)
37412 *total = rs6000_cost->muldi;
37413 else
37414 *total = rs6000_cost->mulsi;
37415 return false;
37417 case FMA:
37418 if (mode == SFmode)
37419 *total = rs6000_cost->fp;
37420 else
37421 *total = rs6000_cost->dmul;
37422 break;
37424 case DIV:
37425 case MOD:
37426 if (FLOAT_MODE_P (mode))
37428 *total = mode == DFmode ? rs6000_cost->ddiv
37429 : rs6000_cost->sdiv;
37430 return false;
37432 /* FALLTHRU */
37434 case UDIV:
37435 case UMOD:
37436 if (GET_CODE (XEXP (x, 1)) == CONST_INT
37437 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
37439 if (code == DIV || code == MOD)
37440 /* Shift, addze */
37441 *total = COSTS_N_INSNS (2);
37442 else
37443 /* Shift */
37444 *total = COSTS_N_INSNS (1);
37446 else
37448 if (GET_MODE (XEXP (x, 1)) == DImode)
37449 *total = rs6000_cost->divdi;
37450 else
37451 *total = rs6000_cost->divsi;
37453 /* Add in shift and subtract for MOD unless we have a mod instruction. */
37454 if (!TARGET_MODULO && (code == MOD || code == UMOD))
37455 *total += COSTS_N_INSNS (2);
37456 return false;
37458 case CTZ:
37459 *total = COSTS_N_INSNS (TARGET_CTZ ? 1 : 4);
37460 return false;
37462 case FFS:
37463 *total = COSTS_N_INSNS (4);
37464 return false;
37466 case POPCOUNT:
37467 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
37468 return false;
37470 case PARITY:
37471 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
37472 return false;
37474 case NOT:
37475 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
37476 *total = 0;
37477 else
37478 *total = COSTS_N_INSNS (1);
37479 return false;
37481 case AND:
37482 if (CONST_INT_P (XEXP (x, 1)))
37484 rtx left = XEXP (x, 0);
37485 rtx_code left_code = GET_CODE (left);
37487 /* rotate-and-mask: 1 insn. */
37488 if ((left_code == ROTATE
37489 || left_code == ASHIFT
37490 || left_code == LSHIFTRT)
37491 && rs6000_is_valid_shift_mask (XEXP (x, 1), left, mode))
37493 *total = rtx_cost (XEXP (left, 0), mode, left_code, 0, speed);
37494 if (!CONST_INT_P (XEXP (left, 1)))
37495 *total += rtx_cost (XEXP (left, 1), SImode, left_code, 1, speed);
37496 *total += COSTS_N_INSNS (1);
37497 return true;
37500 /* rotate-and-mask (no rotate), andi., andis.: 1 insn. */
37501 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
37502 if (rs6000_is_valid_and_mask (XEXP (x, 1), mode)
37503 || (val & 0xffff) == val
37504 || (val & 0xffff0000) == val
37505 || ((val & 0xffff) == 0 && mode == SImode))
37507 *total = rtx_cost (left, mode, AND, 0, speed);
37508 *total += COSTS_N_INSNS (1);
37509 return true;
37512 /* 2 insns. */
37513 if (rs6000_is_valid_2insn_and (XEXP (x, 1), mode))
37515 *total = rtx_cost (left, mode, AND, 0, speed);
37516 *total += COSTS_N_INSNS (2);
37517 return true;
37521 *total = COSTS_N_INSNS (1);
37522 return false;
37524 case IOR:
37525 /* FIXME */
37526 *total = COSTS_N_INSNS (1);
37527 return true;
37529 case CLZ:
37530 case XOR:
37531 case ZERO_EXTRACT:
37532 *total = COSTS_N_INSNS (1);
37533 return false;
37535 case ASHIFT:
37536 /* The EXTSWSLI instruction is a combined instruction. Don't count both
37537 the sign extend and shift separately within the insn. */
37538 if (TARGET_EXTSWSLI && mode == DImode
37539 && GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
37540 && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode)
37542 *total = 0;
37543 return false;
37545 /* fall through */
37547 case ASHIFTRT:
37548 case LSHIFTRT:
37549 case ROTATE:
37550 case ROTATERT:
37551 /* Handle mul_highpart. */
37552 if (outer_code == TRUNCATE
37553 && GET_CODE (XEXP (x, 0)) == MULT)
37555 if (mode == DImode)
37556 *total = rs6000_cost->muldi;
37557 else
37558 *total = rs6000_cost->mulsi;
37559 return true;
37561 else if (outer_code == AND)
37562 *total = 0;
37563 else
37564 *total = COSTS_N_INSNS (1);
37565 return false;
37567 case SIGN_EXTEND:
37568 case ZERO_EXTEND:
37569 if (GET_CODE (XEXP (x, 0)) == MEM)
37570 *total = 0;
37571 else
37572 *total = COSTS_N_INSNS (1);
37573 return false;
37575 case COMPARE:
37576 case NEG:
37577 case ABS:
37578 if (!FLOAT_MODE_P (mode))
37580 *total = COSTS_N_INSNS (1);
37581 return false;
37583 /* FALLTHRU */
37585 case FLOAT:
37586 case UNSIGNED_FLOAT:
37587 case FIX:
37588 case UNSIGNED_FIX:
37589 case FLOAT_TRUNCATE:
37590 *total = rs6000_cost->fp;
37591 return false;
37593 case FLOAT_EXTEND:
37594 if (mode == DFmode)
37595 *total = rs6000_cost->sfdf_convert;
37596 else
37597 *total = rs6000_cost->fp;
37598 return false;
37600 case UNSPEC:
37601 switch (XINT (x, 1))
37603 case UNSPEC_FRSP:
37604 *total = rs6000_cost->fp;
37605 return true;
37607 default:
37608 break;
37610 break;
37612 case CALL:
37613 case IF_THEN_ELSE:
37614 if (!speed)
37616 *total = COSTS_N_INSNS (1);
37617 return true;
37619 else if (FLOAT_MODE_P (mode)
37620 && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS)
37622 *total = rs6000_cost->fp;
37623 return false;
37625 break;
37627 case NE:
37628 case EQ:
37629 case GTU:
37630 case LTU:
37631 /* Carry bit requires mode == Pmode.
37632 NEG or PLUS already counted so only add one. */
37633 if (mode == Pmode
37634 && (outer_code == NEG || outer_code == PLUS))
37636 *total = COSTS_N_INSNS (1);
37637 return true;
37639 if (outer_code == SET)
37641 if (XEXP (x, 1) == const0_rtx)
37643 if (TARGET_ISEL && !TARGET_MFCRF)
37644 *total = COSTS_N_INSNS (8);
37645 else
37646 *total = COSTS_N_INSNS (2);
37647 return true;
37649 else
37651 *total = COSTS_N_INSNS (3);
37652 return false;
37655 /* FALLTHRU */
37657 case GT:
37658 case LT:
37659 case UNORDERED:
37660 if (outer_code == SET && (XEXP (x, 1) == const0_rtx))
37662 if (TARGET_ISEL && !TARGET_MFCRF)
37663 *total = COSTS_N_INSNS (8);
37664 else
37665 *total = COSTS_N_INSNS (2);
37666 return true;
37668 /* CC COMPARE. */
37669 if (outer_code == COMPARE)
37671 *total = 0;
37672 return true;
37674 break;
37676 default:
37677 break;
37680 return false;
37683 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
37685 static bool
37686 rs6000_debug_rtx_costs (rtx x, machine_mode mode, int outer_code,
37687 int opno, int *total, bool speed)
37689 bool ret = rs6000_rtx_costs (x, mode, outer_code, opno, total, speed);
37691 fprintf (stderr,
37692 "\nrs6000_rtx_costs, return = %s, mode = %s, outer_code = %s, "
37693 "opno = %d, total = %d, speed = %s, x:\n",
37694 ret ? "complete" : "scan inner",
37695 GET_MODE_NAME (mode),
37696 GET_RTX_NAME (outer_code),
37697 opno,
37698 *total,
37699 speed ? "true" : "false");
37701 debug_rtx (x);
37703 return ret;
37706 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
37708 static int
37709 rs6000_debug_address_cost (rtx x, machine_mode mode,
37710 addr_space_t as, bool speed)
37712 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
37714 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
37715 ret, speed ? "true" : "false");
37716 debug_rtx (x);
37718 return ret;
37722 /* A C expression returning the cost of moving data from a register of class
37723 CLASS1 to one of CLASS2. */
37725 static int
37726 rs6000_register_move_cost (machine_mode mode,
37727 reg_class_t from, reg_class_t to)
37729 int ret;
37731 if (TARGET_DEBUG_COST)
37732 dbg_cost_ctrl++;
37734 /* Moves from/to GENERAL_REGS. */
37735 if (reg_classes_intersect_p (to, GENERAL_REGS)
37736 || reg_classes_intersect_p (from, GENERAL_REGS))
37738 reg_class_t rclass = from;
37740 if (! reg_classes_intersect_p (to, GENERAL_REGS))
37741 rclass = to;
37743 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
37744 ret = (rs6000_memory_move_cost (mode, rclass, false)
37745 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
37747 /* It's more expensive to move CR_REGS than CR0_REGS because of the
37748 shift. */
37749 else if (rclass == CR_REGS)
37750 ret = 4;
37752 /* For those processors that have slow LR/CTR moves, make them more
37753 expensive than memory in order to bias spills to memory .*/
37754 else if ((rs6000_cpu == PROCESSOR_POWER6
37755 || rs6000_cpu == PROCESSOR_POWER7
37756 || rs6000_cpu == PROCESSOR_POWER8
37757 || rs6000_cpu == PROCESSOR_POWER9)
37758 && reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
37759 ret = 6 * hard_regno_nregs[0][mode];
37761 else
37762 /* A move will cost one instruction per GPR moved. */
37763 ret = 2 * hard_regno_nregs[0][mode];
37766 /* If we have VSX, we can easily move between FPR or Altivec registers. */
37767 else if (VECTOR_MEM_VSX_P (mode)
37768 && reg_classes_intersect_p (to, VSX_REGS)
37769 && reg_classes_intersect_p (from, VSX_REGS))
37770 ret = 2 * hard_regno_nregs[FIRST_FPR_REGNO][mode];
37772 /* Moving between two similar registers is just one instruction. */
37773 else if (reg_classes_intersect_p (to, from))
37774 ret = (FLOAT128_2REG_P (mode)) ? 4 : 2;
37776 /* Everything else has to go through GENERAL_REGS. */
37777 else
37778 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
37779 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
37781 if (TARGET_DEBUG_COST)
37783 if (dbg_cost_ctrl == 1)
37784 fprintf (stderr,
37785 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
37786 ret, GET_MODE_NAME (mode), reg_class_names[from],
37787 reg_class_names[to]);
37788 dbg_cost_ctrl--;
37791 return ret;
37794 /* A C expressions returning the cost of moving data of MODE from a register to
37795 or from memory. */
37797 static int
37798 rs6000_memory_move_cost (machine_mode mode, reg_class_t rclass,
37799 bool in ATTRIBUTE_UNUSED)
37801 int ret;
37803 if (TARGET_DEBUG_COST)
37804 dbg_cost_ctrl++;
37806 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
37807 ret = 4 * hard_regno_nregs[0][mode];
37808 else if ((reg_classes_intersect_p (rclass, FLOAT_REGS)
37809 || reg_classes_intersect_p (rclass, VSX_REGS)))
37810 ret = 4 * hard_regno_nregs[32][mode];
37811 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
37812 ret = 4 * hard_regno_nregs[FIRST_ALTIVEC_REGNO][mode];
37813 else
37814 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
37816 if (TARGET_DEBUG_COST)
37818 if (dbg_cost_ctrl == 1)
37819 fprintf (stderr,
37820 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
37821 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
37822 dbg_cost_ctrl--;
37825 return ret;
37828 /* Returns a code for a target-specific builtin that implements
37829 reciprocal of the function, or NULL_TREE if not available. */
37831 static tree
37832 rs6000_builtin_reciprocal (tree fndecl)
37834 switch (DECL_FUNCTION_CODE (fndecl))
37836 case VSX_BUILTIN_XVSQRTDP:
37837 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
37838 return NULL_TREE;
37840 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
37842 case VSX_BUILTIN_XVSQRTSP:
37843 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
37844 return NULL_TREE;
37846 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
37848 default:
37849 return NULL_TREE;
37853 /* Load up a constant. If the mode is a vector mode, splat the value across
37854 all of the vector elements. */
37856 static rtx
37857 rs6000_load_constant_and_splat (machine_mode mode, REAL_VALUE_TYPE dconst)
37859 rtx reg;
37861 if (mode == SFmode || mode == DFmode)
37863 rtx d = const_double_from_real_value (dconst, mode);
37864 reg = force_reg (mode, d);
37866 else if (mode == V4SFmode)
37868 rtx d = const_double_from_real_value (dconst, SFmode);
37869 rtvec v = gen_rtvec (4, d, d, d, d);
37870 reg = gen_reg_rtx (mode);
37871 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
37873 else if (mode == V2DFmode)
37875 rtx d = const_double_from_real_value (dconst, DFmode);
37876 rtvec v = gen_rtvec (2, d, d);
37877 reg = gen_reg_rtx (mode);
37878 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
37880 else
37881 gcc_unreachable ();
37883 return reg;
37886 /* Generate an FMA instruction. */
37888 static void
37889 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
37891 machine_mode mode = GET_MODE (target);
37892 rtx dst;
37894 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
37895 gcc_assert (dst != NULL);
37897 if (dst != target)
37898 emit_move_insn (target, dst);
37901 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
37903 static void
37904 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
37906 machine_mode mode = GET_MODE (dst);
37907 rtx r;
37909 /* This is a tad more complicated, since the fnma_optab is for
37910 a different expression: fma(-m1, m2, a), which is the same
37911 thing except in the case of signed zeros.
37913 Fortunately we know that if FMA is supported that FNMSUB is
37914 also supported in the ISA. Just expand it directly. */
37916 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
37918 r = gen_rtx_NEG (mode, a);
37919 r = gen_rtx_FMA (mode, m1, m2, r);
37920 r = gen_rtx_NEG (mode, r);
37921 emit_insn (gen_rtx_SET (dst, r));
37924 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
37925 add a reg_note saying that this was a division. Support both scalar and
37926 vector divide. Assumes no trapping math and finite arguments. */
37928 void
37929 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
37931 machine_mode mode = GET_MODE (dst);
37932 rtx one, x0, e0, x1, xprev, eprev, xnext, enext, u, v;
37933 int i;
37935 /* Low precision estimates guarantee 5 bits of accuracy. High
37936 precision estimates guarantee 14 bits of accuracy. SFmode
37937 requires 23 bits of accuracy. DFmode requires 52 bits of
37938 accuracy. Each pass at least doubles the accuracy, leading
37939 to the following. */
37940 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
37941 if (mode == DFmode || mode == V2DFmode)
37942 passes++;
37944 enum insn_code code = optab_handler (smul_optab, mode);
37945 insn_gen_fn gen_mul = GEN_FCN (code);
37947 gcc_assert (code != CODE_FOR_nothing);
37949 one = rs6000_load_constant_and_splat (mode, dconst1);
37951 /* x0 = 1./d estimate */
37952 x0 = gen_reg_rtx (mode);
37953 emit_insn (gen_rtx_SET (x0, gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
37954 UNSPEC_FRES)));
37956 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
37957 if (passes > 1) {
37959 /* e0 = 1. - d * x0 */
37960 e0 = gen_reg_rtx (mode);
37961 rs6000_emit_nmsub (e0, d, x0, one);
37963 /* x1 = x0 + e0 * x0 */
37964 x1 = gen_reg_rtx (mode);
37965 rs6000_emit_madd (x1, e0, x0, x0);
37967 for (i = 0, xprev = x1, eprev = e0; i < passes - 2;
37968 ++i, xprev = xnext, eprev = enext) {
37970 /* enext = eprev * eprev */
37971 enext = gen_reg_rtx (mode);
37972 emit_insn (gen_mul (enext, eprev, eprev));
37974 /* xnext = xprev + enext * xprev */
37975 xnext = gen_reg_rtx (mode);
37976 rs6000_emit_madd (xnext, enext, xprev, xprev);
37979 } else
37980 xprev = x0;
37982 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
37984 /* u = n * xprev */
37985 u = gen_reg_rtx (mode);
37986 emit_insn (gen_mul (u, n, xprev));
37988 /* v = n - (d * u) */
37989 v = gen_reg_rtx (mode);
37990 rs6000_emit_nmsub (v, d, u, n);
37992 /* dst = (v * xprev) + u */
37993 rs6000_emit_madd (dst, v, xprev, u);
37995 if (note_p)
37996 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
37999 /* Goldschmidt's Algorithm for single/double-precision floating point
38000 sqrt and rsqrt. Assumes no trapping math and finite arguments. */
38002 void
38003 rs6000_emit_swsqrt (rtx dst, rtx src, bool recip)
38005 machine_mode mode = GET_MODE (src);
38006 rtx e = gen_reg_rtx (mode);
38007 rtx g = gen_reg_rtx (mode);
38008 rtx h = gen_reg_rtx (mode);
38010 /* Low precision estimates guarantee 5 bits of accuracy. High
38011 precision estimates guarantee 14 bits of accuracy. SFmode
38012 requires 23 bits of accuracy. DFmode requires 52 bits of
38013 accuracy. Each pass at least doubles the accuracy, leading
38014 to the following. */
38015 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
38016 if (mode == DFmode || mode == V2DFmode)
38017 passes++;
38019 int i;
38020 rtx mhalf;
38021 enum insn_code code = optab_handler (smul_optab, mode);
38022 insn_gen_fn gen_mul = GEN_FCN (code);
38024 gcc_assert (code != CODE_FOR_nothing);
38026 mhalf = rs6000_load_constant_and_splat (mode, dconsthalf);
38028 /* e = rsqrt estimate */
38029 emit_insn (gen_rtx_SET (e, gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
38030 UNSPEC_RSQRT)));
38032 /* If (src == 0.0) filter infinity to prevent NaN for sqrt(0.0). */
38033 if (!recip)
38035 rtx zero = force_reg (mode, CONST0_RTX (mode));
38037 if (mode == SFmode)
38039 rtx target = emit_conditional_move (e, GT, src, zero, mode,
38040 e, zero, mode, 0);
38041 if (target != e)
38042 emit_move_insn (e, target);
38044 else
38046 rtx cond = gen_rtx_GT (VOIDmode, e, zero);
38047 rs6000_emit_vector_cond_expr (e, e, zero, cond, src, zero);
38051 /* g = sqrt estimate. */
38052 emit_insn (gen_mul (g, e, src));
38053 /* h = 1/(2*sqrt) estimate. */
38054 emit_insn (gen_mul (h, e, mhalf));
38056 if (recip)
38058 if (passes == 1)
38060 rtx t = gen_reg_rtx (mode);
38061 rs6000_emit_nmsub (t, g, h, mhalf);
38062 /* Apply correction directly to 1/rsqrt estimate. */
38063 rs6000_emit_madd (dst, e, t, e);
38065 else
38067 for (i = 0; i < passes; i++)
38069 rtx t1 = gen_reg_rtx (mode);
38070 rtx g1 = gen_reg_rtx (mode);
38071 rtx h1 = gen_reg_rtx (mode);
38073 rs6000_emit_nmsub (t1, g, h, mhalf);
38074 rs6000_emit_madd (g1, g, t1, g);
38075 rs6000_emit_madd (h1, h, t1, h);
38077 g = g1;
38078 h = h1;
38080 /* Multiply by 2 for 1/rsqrt. */
38081 emit_insn (gen_add3_insn (dst, h, h));
38084 else
38086 rtx t = gen_reg_rtx (mode);
38087 rs6000_emit_nmsub (t, g, h, mhalf);
38088 rs6000_emit_madd (dst, g, t, g);
38091 return;
38094 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
38095 (Power7) targets. DST is the target, and SRC is the argument operand. */
38097 void
38098 rs6000_emit_popcount (rtx dst, rtx src)
38100 machine_mode mode = GET_MODE (dst);
38101 rtx tmp1, tmp2;
38103 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
38104 if (TARGET_POPCNTD)
38106 if (mode == SImode)
38107 emit_insn (gen_popcntdsi2 (dst, src));
38108 else
38109 emit_insn (gen_popcntddi2 (dst, src));
38110 return;
38113 tmp1 = gen_reg_rtx (mode);
38115 if (mode == SImode)
38117 emit_insn (gen_popcntbsi2 (tmp1, src));
38118 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
38119 NULL_RTX, 0);
38120 tmp2 = force_reg (SImode, tmp2);
38121 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
38123 else
38125 emit_insn (gen_popcntbdi2 (tmp1, src));
38126 tmp2 = expand_mult (DImode, tmp1,
38127 GEN_INT ((HOST_WIDE_INT)
38128 0x01010101 << 32 | 0x01010101),
38129 NULL_RTX, 0);
38130 tmp2 = force_reg (DImode, tmp2);
38131 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
38136 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
38137 target, and SRC is the argument operand. */
38139 void
38140 rs6000_emit_parity (rtx dst, rtx src)
38142 machine_mode mode = GET_MODE (dst);
38143 rtx tmp;
38145 tmp = gen_reg_rtx (mode);
38147 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
38148 if (TARGET_CMPB)
38150 if (mode == SImode)
38152 emit_insn (gen_popcntbsi2 (tmp, src));
38153 emit_insn (gen_paritysi2_cmpb (dst, tmp));
38155 else
38157 emit_insn (gen_popcntbdi2 (tmp, src));
38158 emit_insn (gen_paritydi2_cmpb (dst, tmp));
38160 return;
38163 if (mode == SImode)
38165 /* Is mult+shift >= shift+xor+shift+xor? */
38166 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
38168 rtx tmp1, tmp2, tmp3, tmp4;
38170 tmp1 = gen_reg_rtx (SImode);
38171 emit_insn (gen_popcntbsi2 (tmp1, src));
38173 tmp2 = gen_reg_rtx (SImode);
38174 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
38175 tmp3 = gen_reg_rtx (SImode);
38176 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
38178 tmp4 = gen_reg_rtx (SImode);
38179 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
38180 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
38182 else
38183 rs6000_emit_popcount (tmp, src);
38184 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
38186 else
38188 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
38189 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
38191 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
38193 tmp1 = gen_reg_rtx (DImode);
38194 emit_insn (gen_popcntbdi2 (tmp1, src));
38196 tmp2 = gen_reg_rtx (DImode);
38197 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
38198 tmp3 = gen_reg_rtx (DImode);
38199 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
38201 tmp4 = gen_reg_rtx (DImode);
38202 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
38203 tmp5 = gen_reg_rtx (DImode);
38204 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
38206 tmp6 = gen_reg_rtx (DImode);
38207 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
38208 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
38210 else
38211 rs6000_emit_popcount (tmp, src);
38212 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
38216 /* Expand an Altivec constant permutation for little endian mode.
38217 There are two issues: First, the two input operands must be
38218 swapped so that together they form a double-wide array in LE
38219 order. Second, the vperm instruction has surprising behavior
38220 in LE mode: it interprets the elements of the source vectors
38221 in BE mode ("left to right") and interprets the elements of
38222 the destination vector in LE mode ("right to left"). To
38223 correct for this, we must subtract each element of the permute
38224 control vector from 31.
38226 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
38227 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
38228 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
38229 serve as the permute control vector. Then, in BE mode,
38231 vperm 9,10,11,12
38233 places the desired result in vr9. However, in LE mode the
38234 vector contents will be
38236 vr10 = 00000003 00000002 00000001 00000000
38237 vr11 = 00000007 00000006 00000005 00000004
38239 The result of the vperm using the same permute control vector is
38241 vr9 = 05000000 07000000 01000000 03000000
38243 That is, the leftmost 4 bytes of vr10 are interpreted as the
38244 source for the rightmost 4 bytes of vr9, and so on.
38246 If we change the permute control vector to
38248 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
38250 and issue
38252 vperm 9,11,10,12
38254 we get the desired
38256 vr9 = 00000006 00000004 00000002 00000000. */
38258 void
38259 altivec_expand_vec_perm_const_le (rtx operands[4])
38261 unsigned int i;
38262 rtx perm[16];
38263 rtx constv, unspec;
38264 rtx target = operands[0];
38265 rtx op0 = operands[1];
38266 rtx op1 = operands[2];
38267 rtx sel = operands[3];
38269 /* Unpack and adjust the constant selector. */
38270 for (i = 0; i < 16; ++i)
38272 rtx e = XVECEXP (sel, 0, i);
38273 unsigned int elt = 31 - (INTVAL (e) & 31);
38274 perm[i] = GEN_INT (elt);
38277 /* Expand to a permute, swapping the inputs and using the
38278 adjusted selector. */
38279 if (!REG_P (op0))
38280 op0 = force_reg (V16QImode, op0);
38281 if (!REG_P (op1))
38282 op1 = force_reg (V16QImode, op1);
38284 constv = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm));
38285 constv = force_reg (V16QImode, constv);
38286 unspec = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, op1, op0, constv),
38287 UNSPEC_VPERM);
38288 if (!REG_P (target))
38290 rtx tmp = gen_reg_rtx (V16QImode);
38291 emit_move_insn (tmp, unspec);
38292 unspec = tmp;
38295 emit_move_insn (target, unspec);
38298 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
38299 permute control vector. But here it's not a constant, so we must
38300 generate a vector NAND or NOR to do the adjustment. */
38302 void
38303 altivec_expand_vec_perm_le (rtx operands[4])
38305 rtx notx, iorx, unspec;
38306 rtx target = operands[0];
38307 rtx op0 = operands[1];
38308 rtx op1 = operands[2];
38309 rtx sel = operands[3];
38310 rtx tmp = target;
38311 rtx norreg = gen_reg_rtx (V16QImode);
38312 machine_mode mode = GET_MODE (target);
38314 /* Get everything in regs so the pattern matches. */
38315 if (!REG_P (op0))
38316 op0 = force_reg (mode, op0);
38317 if (!REG_P (op1))
38318 op1 = force_reg (mode, op1);
38319 if (!REG_P (sel))
38320 sel = force_reg (V16QImode, sel);
38321 if (!REG_P (target))
38322 tmp = gen_reg_rtx (mode);
38324 if (TARGET_P9_VECTOR)
38326 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op0, op1, sel),
38327 UNSPEC_VPERMR);
38329 else
38331 /* Invert the selector with a VNAND if available, else a VNOR.
38332 The VNAND is preferred for future fusion opportunities. */
38333 notx = gen_rtx_NOT (V16QImode, sel);
38334 iorx = (TARGET_P8_VECTOR
38335 ? gen_rtx_IOR (V16QImode, notx, notx)
38336 : gen_rtx_AND (V16QImode, notx, notx));
38337 emit_insn (gen_rtx_SET (norreg, iorx));
38339 /* Permute with operands reversed and adjusted selector. */
38340 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, norreg),
38341 UNSPEC_VPERM);
38344 /* Copy into target, possibly by way of a register. */
38345 if (!REG_P (target))
38347 emit_move_insn (tmp, unspec);
38348 unspec = tmp;
38351 emit_move_insn (target, unspec);
38354 /* Expand an Altivec constant permutation. Return true if we match
38355 an efficient implementation; false to fall back to VPERM. */
38357 bool
38358 altivec_expand_vec_perm_const (rtx operands[4])
38360 struct altivec_perm_insn {
38361 HOST_WIDE_INT mask;
38362 enum insn_code impl;
38363 unsigned char perm[16];
38365 static const struct altivec_perm_insn patterns[] = {
38366 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum_direct,
38367 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
38368 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum_direct,
38369 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
38370 { OPTION_MASK_ALTIVEC,
38371 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghb_direct
38372 : CODE_FOR_altivec_vmrglb_direct),
38373 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
38374 { OPTION_MASK_ALTIVEC,
38375 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghh_direct
38376 : CODE_FOR_altivec_vmrglh_direct),
38377 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
38378 { OPTION_MASK_ALTIVEC,
38379 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghw_direct
38380 : CODE_FOR_altivec_vmrglw_direct),
38381 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
38382 { OPTION_MASK_ALTIVEC,
38383 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglb_direct
38384 : CODE_FOR_altivec_vmrghb_direct),
38385 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
38386 { OPTION_MASK_ALTIVEC,
38387 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglh_direct
38388 : CODE_FOR_altivec_vmrghh_direct),
38389 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
38390 { OPTION_MASK_ALTIVEC,
38391 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglw_direct
38392 : CODE_FOR_altivec_vmrghw_direct),
38393 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
38394 { OPTION_MASK_P8_VECTOR, CODE_FOR_p8_vmrgew,
38395 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
38396 { OPTION_MASK_P8_VECTOR, CODE_FOR_p8_vmrgow,
38397 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
38400 unsigned int i, j, elt, which;
38401 unsigned char perm[16];
38402 rtx target, op0, op1, sel, x;
38403 bool one_vec;
38405 target = operands[0];
38406 op0 = operands[1];
38407 op1 = operands[2];
38408 sel = operands[3];
38410 /* Unpack the constant selector. */
38411 for (i = which = 0; i < 16; ++i)
38413 rtx e = XVECEXP (sel, 0, i);
38414 elt = INTVAL (e) & 31;
38415 which |= (elt < 16 ? 1 : 2);
38416 perm[i] = elt;
38419 /* Simplify the constant selector based on operands. */
38420 switch (which)
38422 default:
38423 gcc_unreachable ();
38425 case 3:
38426 one_vec = false;
38427 if (!rtx_equal_p (op0, op1))
38428 break;
38429 /* FALLTHRU */
38431 case 2:
38432 for (i = 0; i < 16; ++i)
38433 perm[i] &= 15;
38434 op0 = op1;
38435 one_vec = true;
38436 break;
38438 case 1:
38439 op1 = op0;
38440 one_vec = true;
38441 break;
38444 /* Look for splat patterns. */
38445 if (one_vec)
38447 elt = perm[0];
38449 for (i = 0; i < 16; ++i)
38450 if (perm[i] != elt)
38451 break;
38452 if (i == 16)
38454 if (!BYTES_BIG_ENDIAN)
38455 elt = 15 - elt;
38456 emit_insn (gen_altivec_vspltb_direct (target, op0, GEN_INT (elt)));
38457 return true;
38460 if (elt % 2 == 0)
38462 for (i = 0; i < 16; i += 2)
38463 if (perm[i] != elt || perm[i + 1] != elt + 1)
38464 break;
38465 if (i == 16)
38467 int field = BYTES_BIG_ENDIAN ? elt / 2 : 7 - elt / 2;
38468 x = gen_reg_rtx (V8HImode);
38469 emit_insn (gen_altivec_vsplth_direct (x, gen_lowpart (V8HImode, op0),
38470 GEN_INT (field)));
38471 emit_move_insn (target, gen_lowpart (V16QImode, x));
38472 return true;
38476 if (elt % 4 == 0)
38478 for (i = 0; i < 16; i += 4)
38479 if (perm[i] != elt
38480 || perm[i + 1] != elt + 1
38481 || perm[i + 2] != elt + 2
38482 || perm[i + 3] != elt + 3)
38483 break;
38484 if (i == 16)
38486 int field = BYTES_BIG_ENDIAN ? elt / 4 : 3 - elt / 4;
38487 x = gen_reg_rtx (V4SImode);
38488 emit_insn (gen_altivec_vspltw_direct (x, gen_lowpart (V4SImode, op0),
38489 GEN_INT (field)));
38490 emit_move_insn (target, gen_lowpart (V16QImode, x));
38491 return true;
38496 /* Look for merge and pack patterns. */
38497 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
38499 bool swapped;
38501 if ((patterns[j].mask & rs6000_isa_flags) == 0)
38502 continue;
38504 elt = patterns[j].perm[0];
38505 if (perm[0] == elt)
38506 swapped = false;
38507 else if (perm[0] == elt + 16)
38508 swapped = true;
38509 else
38510 continue;
38511 for (i = 1; i < 16; ++i)
38513 elt = patterns[j].perm[i];
38514 if (swapped)
38515 elt = (elt >= 16 ? elt - 16 : elt + 16);
38516 else if (one_vec && elt >= 16)
38517 elt -= 16;
38518 if (perm[i] != elt)
38519 break;
38521 if (i == 16)
38523 enum insn_code icode = patterns[j].impl;
38524 machine_mode omode = insn_data[icode].operand[0].mode;
38525 machine_mode imode = insn_data[icode].operand[1].mode;
38527 /* For little-endian, don't use vpkuwum and vpkuhum if the
38528 underlying vector type is not V4SI and V8HI, respectively.
38529 For example, using vpkuwum with a V8HI picks up the even
38530 halfwords (BE numbering) when the even halfwords (LE
38531 numbering) are what we need. */
38532 if (!BYTES_BIG_ENDIAN
38533 && icode == CODE_FOR_altivec_vpkuwum_direct
38534 && ((GET_CODE (op0) == REG
38535 && GET_MODE (op0) != V4SImode)
38536 || (GET_CODE (op0) == SUBREG
38537 && GET_MODE (XEXP (op0, 0)) != V4SImode)))
38538 continue;
38539 if (!BYTES_BIG_ENDIAN
38540 && icode == CODE_FOR_altivec_vpkuhum_direct
38541 && ((GET_CODE (op0) == REG
38542 && GET_MODE (op0) != V8HImode)
38543 || (GET_CODE (op0) == SUBREG
38544 && GET_MODE (XEXP (op0, 0)) != V8HImode)))
38545 continue;
38547 /* For little-endian, the two input operands must be swapped
38548 (or swapped back) to ensure proper right-to-left numbering
38549 from 0 to 2N-1. */
38550 if (swapped ^ !BYTES_BIG_ENDIAN)
38551 std::swap (op0, op1);
38552 if (imode != V16QImode)
38554 op0 = gen_lowpart (imode, op0);
38555 op1 = gen_lowpart (imode, op1);
38557 if (omode == V16QImode)
38558 x = target;
38559 else
38560 x = gen_reg_rtx (omode);
38561 emit_insn (GEN_FCN (icode) (x, op0, op1));
38562 if (omode != V16QImode)
38563 emit_move_insn (target, gen_lowpart (V16QImode, x));
38564 return true;
38568 if (!BYTES_BIG_ENDIAN)
38570 altivec_expand_vec_perm_const_le (operands);
38571 return true;
38574 return false;
38577 /* Expand a Paired Single, VSX Permute Doubleword, or SPE constant permutation.
38578 Return true if we match an efficient implementation. */
38580 static bool
38581 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
38582 unsigned char perm0, unsigned char perm1)
38584 rtx x;
38586 /* If both selectors come from the same operand, fold to single op. */
38587 if ((perm0 & 2) == (perm1 & 2))
38589 if (perm0 & 2)
38590 op0 = op1;
38591 else
38592 op1 = op0;
38594 /* If both operands are equal, fold to simpler permutation. */
38595 if (rtx_equal_p (op0, op1))
38597 perm0 = perm0 & 1;
38598 perm1 = (perm1 & 1) + 2;
38600 /* If the first selector comes from the second operand, swap. */
38601 else if (perm0 & 2)
38603 if (perm1 & 2)
38604 return false;
38605 perm0 -= 2;
38606 perm1 += 2;
38607 std::swap (op0, op1);
38609 /* If the second selector does not come from the second operand, fail. */
38610 else if ((perm1 & 2) == 0)
38611 return false;
38613 /* Success! */
38614 if (target != NULL)
38616 machine_mode vmode, dmode;
38617 rtvec v;
38619 vmode = GET_MODE (target);
38620 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
38621 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4);
38622 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
38623 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
38624 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
38625 emit_insn (gen_rtx_SET (target, x));
38627 return true;
38630 bool
38631 rs6000_expand_vec_perm_const (rtx operands[4])
38633 rtx target, op0, op1, sel;
38634 unsigned char perm0, perm1;
38636 target = operands[0];
38637 op0 = operands[1];
38638 op1 = operands[2];
38639 sel = operands[3];
38641 /* Unpack the constant selector. */
38642 perm0 = INTVAL (XVECEXP (sel, 0, 0)) & 3;
38643 perm1 = INTVAL (XVECEXP (sel, 0, 1)) & 3;
38645 return rs6000_expand_vec_perm_const_1 (target, op0, op1, perm0, perm1);
38648 /* Test whether a constant permutation is supported. */
38650 static bool
38651 rs6000_vectorize_vec_perm_const_ok (machine_mode vmode,
38652 const unsigned char *sel)
38654 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
38655 if (TARGET_ALTIVEC)
38656 return true;
38658 /* Check for ps_merge* or evmerge* insns. */
38659 if ((TARGET_PAIRED_FLOAT && vmode == V2SFmode)
38660 || (TARGET_SPE && vmode == V2SImode))
38662 rtx op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
38663 rtx op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
38664 return rs6000_expand_vec_perm_const_1 (NULL, op0, op1, sel[0], sel[1]);
38667 return false;
38670 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave. */
38672 static void
38673 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
38674 machine_mode vmode, unsigned nelt, rtx perm[])
38676 machine_mode imode;
38677 rtx x;
38679 imode = vmode;
38680 if (GET_MODE_CLASS (vmode) != MODE_VECTOR_INT)
38682 imode = mode_for_size (GET_MODE_UNIT_BITSIZE (vmode), MODE_INT, 0);
38683 imode = mode_for_vector (imode, nelt);
38686 x = gen_rtx_CONST_VECTOR (imode, gen_rtvec_v (nelt, perm));
38687 x = expand_vec_perm (vmode, op0, op1, x, target);
38688 if (x != target)
38689 emit_move_insn (target, x);
38692 /* Expand an extract even operation. */
38694 void
38695 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
38697 machine_mode vmode = GET_MODE (target);
38698 unsigned i, nelt = GET_MODE_NUNITS (vmode);
38699 rtx perm[16];
38701 for (i = 0; i < nelt; i++)
38702 perm[i] = GEN_INT (i * 2);
38704 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
38707 /* Expand a vector interleave operation. */
38709 void
38710 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
38712 machine_mode vmode = GET_MODE (target);
38713 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
38714 rtx perm[16];
38716 high = (highp ? 0 : nelt / 2);
38717 for (i = 0; i < nelt / 2; i++)
38719 perm[i * 2] = GEN_INT (i + high);
38720 perm[i * 2 + 1] = GEN_INT (i + nelt + high);
38723 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
38726 /* Scale a V2DF vector SRC by two to the SCALE and place in TGT. */
38727 void
38728 rs6000_scale_v2df (rtx tgt, rtx src, int scale)
38730 HOST_WIDE_INT hwi_scale (scale);
38731 REAL_VALUE_TYPE r_pow;
38732 rtvec v = rtvec_alloc (2);
38733 rtx elt;
38734 rtx scale_vec = gen_reg_rtx (V2DFmode);
38735 (void)real_powi (&r_pow, DFmode, &dconst2, hwi_scale);
38736 elt = const_double_from_real_value (r_pow, DFmode);
38737 RTVEC_ELT (v, 0) = elt;
38738 RTVEC_ELT (v, 1) = elt;
38739 rs6000_expand_vector_init (scale_vec, gen_rtx_PARALLEL (V2DFmode, v));
38740 emit_insn (gen_mulv2df3 (tgt, src, scale_vec));
38743 /* Return an RTX representing where to find the function value of a
38744 function returning MODE. */
38745 static rtx
38746 rs6000_complex_function_value (machine_mode mode)
38748 unsigned int regno;
38749 rtx r1, r2;
38750 machine_mode inner = GET_MODE_INNER (mode);
38751 unsigned int inner_bytes = GET_MODE_UNIT_SIZE (mode);
38753 if (TARGET_FLOAT128_TYPE
38754 && (mode == KCmode
38755 || (mode == TCmode && TARGET_IEEEQUAD)))
38756 regno = ALTIVEC_ARG_RETURN;
38758 else if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
38759 regno = FP_ARG_RETURN;
38761 else
38763 regno = GP_ARG_RETURN;
38765 /* 32-bit is OK since it'll go in r3/r4. */
38766 if (TARGET_32BIT && inner_bytes >= 4)
38767 return gen_rtx_REG (mode, regno);
38770 if (inner_bytes >= 8)
38771 return gen_rtx_REG (mode, regno);
38773 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
38774 const0_rtx);
38775 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
38776 GEN_INT (inner_bytes));
38777 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
38780 /* Return an rtx describing a return value of MODE as a PARALLEL
38781 in N_ELTS registers, each of mode ELT_MODE, starting at REGNO,
38782 stride REG_STRIDE. */
38784 static rtx
38785 rs6000_parallel_return (machine_mode mode,
38786 int n_elts, machine_mode elt_mode,
38787 unsigned int regno, unsigned int reg_stride)
38789 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
38791 int i;
38792 for (i = 0; i < n_elts; i++)
38794 rtx r = gen_rtx_REG (elt_mode, regno);
38795 rtx off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
38796 XVECEXP (par, 0, i) = gen_rtx_EXPR_LIST (VOIDmode, r, off);
38797 regno += reg_stride;
38800 return par;
38803 /* Target hook for TARGET_FUNCTION_VALUE.
38805 On the SPE, both FPs and vectors are returned in r3.
38807 On RS/6000 an integer value is in r3 and a floating-point value is in
38808 fp1, unless -msoft-float. */
38810 static rtx
38811 rs6000_function_value (const_tree valtype,
38812 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
38813 bool outgoing ATTRIBUTE_UNUSED)
38815 machine_mode mode;
38816 unsigned int regno;
38817 machine_mode elt_mode;
38818 int n_elts;
38820 /* Special handling for structs in darwin64. */
38821 if (TARGET_MACHO
38822 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
38824 CUMULATIVE_ARGS valcum;
38825 rtx valret;
38827 valcum.words = 0;
38828 valcum.fregno = FP_ARG_MIN_REG;
38829 valcum.vregno = ALTIVEC_ARG_MIN_REG;
38830 /* Do a trial code generation as if this were going to be passed as
38831 an argument; if any part goes in memory, we return NULL. */
38832 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
38833 if (valret)
38834 return valret;
38835 /* Otherwise fall through to standard ABI rules. */
38838 mode = TYPE_MODE (valtype);
38840 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
38841 if (rs6000_discover_homogeneous_aggregate (mode, valtype, &elt_mode, &n_elts))
38843 int first_reg, n_regs;
38845 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (elt_mode))
38847 /* _Decimal128 must use even/odd register pairs. */
38848 first_reg = (elt_mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
38849 n_regs = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
38851 else
38853 first_reg = ALTIVEC_ARG_RETURN;
38854 n_regs = 1;
38857 return rs6000_parallel_return (mode, n_elts, elt_mode, first_reg, n_regs);
38860 /* Some return value types need be split in -mpowerpc64, 32bit ABI. */
38861 if (TARGET_32BIT && TARGET_POWERPC64)
38862 switch (mode)
38864 default:
38865 break;
38866 case DImode:
38867 case SCmode:
38868 case DCmode:
38869 case TCmode:
38870 int count = GET_MODE_SIZE (mode) / 4;
38871 return rs6000_parallel_return (mode, count, SImode, GP_ARG_RETURN, 1);
38874 if ((INTEGRAL_TYPE_P (valtype)
38875 && GET_MODE_BITSIZE (mode) < (TARGET_32BIT ? 32 : 64))
38876 || POINTER_TYPE_P (valtype))
38877 mode = TARGET_32BIT ? SImode : DImode;
38879 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
38880 /* _Decimal128 must use an even/odd register pair. */
38881 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
38882 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT && TARGET_FPRS
38883 && !FLOAT128_VECTOR_P (mode)
38884 && ((TARGET_SINGLE_FLOAT && (mode == SFmode)) || TARGET_DOUBLE_FLOAT))
38885 regno = FP_ARG_RETURN;
38886 else if (TREE_CODE (valtype) == COMPLEX_TYPE
38887 && targetm.calls.split_complex_arg)
38888 return rs6000_complex_function_value (mode);
38889 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
38890 return register is used in both cases, and we won't see V2DImode/V2DFmode
38891 for pure altivec, combine the two cases. */
38892 else if ((TREE_CODE (valtype) == VECTOR_TYPE || FLOAT128_VECTOR_P (mode))
38893 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
38894 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
38895 regno = ALTIVEC_ARG_RETURN;
38896 else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
38897 && (mode == DFmode || mode == DCmode
38898 || FLOAT128_IBM_P (mode) || mode == TCmode))
38899 return spe_build_register_parallel (mode, GP_ARG_RETURN);
38900 else
38901 regno = GP_ARG_RETURN;
38903 return gen_rtx_REG (mode, regno);
38906 /* Define how to find the value returned by a library function
38907 assuming the value has mode MODE. */
38909 rs6000_libcall_value (machine_mode mode)
38911 unsigned int regno;
38913 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
38914 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
38915 return rs6000_parallel_return (mode, 2, SImode, GP_ARG_RETURN, 1);
38917 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
38918 /* _Decimal128 must use an even/odd register pair. */
38919 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
38920 else if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode)
38921 && TARGET_HARD_FLOAT && TARGET_FPRS
38922 && ((TARGET_SINGLE_FLOAT && mode == SFmode) || TARGET_DOUBLE_FLOAT))
38923 regno = FP_ARG_RETURN;
38924 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
38925 return register is used in both cases, and we won't see V2DImode/V2DFmode
38926 for pure altivec, combine the two cases. */
38927 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
38928 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
38929 regno = ALTIVEC_ARG_RETURN;
38930 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
38931 return rs6000_complex_function_value (mode);
38932 else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
38933 && (mode == DFmode || mode == DCmode
38934 || FLOAT128_IBM_P (mode) || mode == TCmode))
38935 return spe_build_register_parallel (mode, GP_ARG_RETURN);
38936 else
38937 regno = GP_ARG_RETURN;
38939 return gen_rtx_REG (mode, regno);
38943 /* Return true if we use LRA instead of reload pass. */
38944 static bool
38945 rs6000_lra_p (void)
38947 return TARGET_LRA;
38950 /* Compute register pressure classes. We implement the target hook to avoid
38951 IRA picking something like NON_SPECIAL_REGS as a pressure class, which can
38952 lead to incorrect estimates of number of available registers and therefor
38953 increased register pressure/spill. */
38954 static int
38955 rs6000_compute_pressure_classes (enum reg_class *pressure_classes)
38957 int n;
38959 n = 0;
38960 pressure_classes[n++] = GENERAL_REGS;
38961 if (TARGET_VSX)
38962 pressure_classes[n++] = VSX_REGS;
38963 else
38965 if (TARGET_ALTIVEC)
38966 pressure_classes[n++] = ALTIVEC_REGS;
38967 if (TARGET_HARD_FLOAT && TARGET_FPRS)
38968 pressure_classes[n++] = FLOAT_REGS;
38970 pressure_classes[n++] = CR_REGS;
38971 pressure_classes[n++] = SPECIAL_REGS;
38973 return n;
38976 /* Given FROM and TO register numbers, say whether this elimination is allowed.
38977 Frame pointer elimination is automatically handled.
38979 For the RS/6000, if frame pointer elimination is being done, we would like
38980 to convert ap into fp, not sp.
38982 We need r30 if -mminimal-toc was specified, and there are constant pool
38983 references. */
38985 static bool
38986 rs6000_can_eliminate (const int from, const int to)
38988 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
38989 ? ! frame_pointer_needed
38990 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
38991 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC
38992 || constant_pool_empty_p ()
38993 : true);
38996 /* Define the offset between two registers, FROM to be eliminated and its
38997 replacement TO, at the start of a routine. */
38998 HOST_WIDE_INT
38999 rs6000_initial_elimination_offset (int from, int to)
39001 rs6000_stack_t *info = rs6000_stack_info ();
39002 HOST_WIDE_INT offset;
39004 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
39005 offset = info->push_p ? 0 : -info->total_size;
39006 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
39008 offset = info->push_p ? 0 : -info->total_size;
39009 if (FRAME_GROWS_DOWNWARD)
39010 offset += info->fixed_size + info->vars_size + info->parm_size;
39012 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
39013 offset = FRAME_GROWS_DOWNWARD
39014 ? info->fixed_size + info->vars_size + info->parm_size
39015 : 0;
39016 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
39017 offset = info->total_size;
39018 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
39019 offset = info->push_p ? info->total_size : 0;
39020 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
39021 offset = 0;
39022 else
39023 gcc_unreachable ();
39025 return offset;
39028 static rtx
39029 rs6000_dwarf_register_span (rtx reg)
39031 rtx parts[8];
39032 int i, words;
39033 unsigned regno = REGNO (reg);
39034 machine_mode mode = GET_MODE (reg);
39036 if (TARGET_SPE
39037 && regno < 32
39038 && (SPE_VECTOR_MODE (GET_MODE (reg))
39039 || (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode)
39040 && mode != SFmode && mode != SDmode && mode != SCmode)))
39042 else
39043 return NULL_RTX;
39045 regno = REGNO (reg);
39047 /* The duality of the SPE register size wreaks all kinds of havoc.
39048 This is a way of distinguishing r0 in 32-bits from r0 in
39049 64-bits. */
39050 words = (GET_MODE_SIZE (mode) + UNITS_PER_FP_WORD - 1) / UNITS_PER_FP_WORD;
39051 gcc_assert (words <= 4);
39052 for (i = 0; i < words; i++, regno++)
39054 if (BYTES_BIG_ENDIAN)
39056 parts[2 * i] = gen_rtx_REG (SImode, regno + FIRST_SPE_HIGH_REGNO);
39057 parts[2 * i + 1] = gen_rtx_REG (SImode, regno);
39059 else
39061 parts[2 * i] = gen_rtx_REG (SImode, regno);
39062 parts[2 * i + 1] = gen_rtx_REG (SImode, regno + FIRST_SPE_HIGH_REGNO);
39066 return gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (words * 2, parts));
39069 /* Fill in sizes for SPE register high parts in table used by unwinder. */
39071 static void
39072 rs6000_init_dwarf_reg_sizes_extra (tree address)
39074 if (TARGET_SPE)
39076 int i;
39077 machine_mode mode = TYPE_MODE (char_type_node);
39078 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
39079 rtx mem = gen_rtx_MEM (BLKmode, addr);
39080 rtx value = gen_int_mode (4, mode);
39082 for (i = FIRST_SPE_HIGH_REGNO; i < LAST_SPE_HIGH_REGNO+1; i++)
39084 int column = DWARF_REG_TO_UNWIND_COLUMN
39085 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
39086 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
39088 emit_move_insn (adjust_address (mem, mode, offset), value);
39092 if (TARGET_MACHO && ! TARGET_ALTIVEC)
39094 int i;
39095 machine_mode mode = TYPE_MODE (char_type_node);
39096 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
39097 rtx mem = gen_rtx_MEM (BLKmode, addr);
39098 rtx value = gen_int_mode (16, mode);
39100 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
39101 The unwinder still needs to know the size of Altivec registers. */
39103 for (i = FIRST_ALTIVEC_REGNO; i < LAST_ALTIVEC_REGNO+1; i++)
39105 int column = DWARF_REG_TO_UNWIND_COLUMN
39106 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
39107 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
39109 emit_move_insn (adjust_address (mem, mode, offset), value);
39114 /* Map internal gcc register numbers to debug format register numbers.
39115 FORMAT specifies the type of debug register number to use:
39116 0 -- debug information, except for frame-related sections
39117 1 -- DWARF .debug_frame section
39118 2 -- DWARF .eh_frame section */
39120 unsigned int
39121 rs6000_dbx_register_number (unsigned int regno, unsigned int format)
39123 /* We never use the GCC internal number for SPE high registers.
39124 Those are mapped to the 1200..1231 range for all debug formats. */
39125 if (SPE_HIGH_REGNO_P (regno))
39126 return regno - FIRST_SPE_HIGH_REGNO + 1200;
39128 /* Except for the above, we use the internal number for non-DWARF
39129 debug information, and also for .eh_frame. */
39130 if ((format == 0 && write_symbols != DWARF2_DEBUG) || format == 2)
39131 return regno;
39133 /* On some platforms, we use the standard DWARF register
39134 numbering for .debug_info and .debug_frame. */
39135 #ifdef RS6000_USE_DWARF_NUMBERING
39136 if (regno <= 63)
39137 return regno;
39138 if (regno == LR_REGNO)
39139 return 108;
39140 if (regno == CTR_REGNO)
39141 return 109;
39142 /* Special handling for CR for .debug_frame: rs6000_emit_prologue has
39143 translated any combination of CR2, CR3, CR4 saves to a save of CR2.
39144 The actual code emitted saves the whole of CR, so we map CR2_REGNO
39145 to the DWARF reg for CR. */
39146 if (format == 1 && regno == CR2_REGNO)
39147 return 64;
39148 if (CR_REGNO_P (regno))
39149 return regno - CR0_REGNO + 86;
39150 if (regno == CA_REGNO)
39151 return 101; /* XER */
39152 if (ALTIVEC_REGNO_P (regno))
39153 return regno - FIRST_ALTIVEC_REGNO + 1124;
39154 if (regno == VRSAVE_REGNO)
39155 return 356;
39156 if (regno == VSCR_REGNO)
39157 return 67;
39158 if (regno == SPE_ACC_REGNO)
39159 return 99;
39160 if (regno == SPEFSCR_REGNO)
39161 return 612;
39162 #endif
39163 return regno;
39166 /* target hook eh_return_filter_mode */
39167 static machine_mode
39168 rs6000_eh_return_filter_mode (void)
39170 return TARGET_32BIT ? SImode : word_mode;
39173 /* Target hook for scalar_mode_supported_p. */
39174 static bool
39175 rs6000_scalar_mode_supported_p (machine_mode mode)
39177 /* -m32 does not support TImode. This is the default, from
39178 default_scalar_mode_supported_p. For -m32 -mpowerpc64 we want the
39179 same ABI as for -m32. But default_scalar_mode_supported_p allows
39180 integer modes of precision 2 * BITS_PER_WORD, which matches TImode
39181 for -mpowerpc64. */
39182 if (TARGET_32BIT && mode == TImode)
39183 return false;
39185 if (DECIMAL_FLOAT_MODE_P (mode))
39186 return default_decimal_float_supported_p ();
39187 else if (TARGET_FLOAT128_TYPE && (mode == KFmode || mode == IFmode))
39188 return true;
39189 else
39190 return default_scalar_mode_supported_p (mode);
39193 /* Target hook for vector_mode_supported_p. */
39194 static bool
39195 rs6000_vector_mode_supported_p (machine_mode mode)
39198 if (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (mode))
39199 return true;
39201 if (TARGET_SPE && SPE_VECTOR_MODE (mode))
39202 return true;
39204 /* There is no vector form for IEEE 128-bit. If we return true for IEEE
39205 128-bit, the compiler might try to widen IEEE 128-bit to IBM
39206 double-double. */
39207 else if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode) && !FLOAT128_IEEE_P (mode))
39208 return true;
39210 else
39211 return false;
39214 /* Target hook for floatn_mode. */
39215 static machine_mode
39216 rs6000_floatn_mode (int n, bool extended)
39218 if (extended)
39220 switch (n)
39222 case 32:
39223 return DFmode;
39225 case 64:
39226 if (TARGET_FLOAT128_KEYWORD)
39227 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
39228 else
39229 return VOIDmode;
39231 case 128:
39232 return VOIDmode;
39234 default:
39235 /* Those are the only valid _FloatNx types. */
39236 gcc_unreachable ();
39239 else
39241 switch (n)
39243 case 32:
39244 return SFmode;
39246 case 64:
39247 return DFmode;
39249 case 128:
39250 if (TARGET_FLOAT128_KEYWORD)
39251 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
39252 else
39253 return VOIDmode;
39255 default:
39256 return VOIDmode;
39262 /* Target hook for c_mode_for_suffix. */
39263 static machine_mode
39264 rs6000_c_mode_for_suffix (char suffix)
39266 if (TARGET_FLOAT128_TYPE)
39268 if (suffix == 'q' || suffix == 'Q')
39269 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
39271 /* At the moment, we are not defining a suffix for IBM extended double.
39272 If/when the default for -mabi=ieeelongdouble is changed, and we want
39273 to support __ibm128 constants in legacy library code, we may need to
39274 re-evalaute this decision. Currently, c-lex.c only supports 'w' and
39275 'q' as machine dependent suffixes. The x86_64 port uses 'w' for
39276 __float80 constants. */
39279 return VOIDmode;
39282 /* Target hook for invalid_arg_for_unprototyped_fn. */
39283 static const char *
39284 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
39286 return (!rs6000_darwin64_abi
39287 && typelist == 0
39288 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
39289 && (funcdecl == NULL_TREE
39290 || (TREE_CODE (funcdecl) == FUNCTION_DECL
39291 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
39292 ? N_("AltiVec argument passed to unprototyped function")
39293 : NULL;
39296 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
39297 setup by using __stack_chk_fail_local hidden function instead of
39298 calling __stack_chk_fail directly. Otherwise it is better to call
39299 __stack_chk_fail directly. */
39301 static tree ATTRIBUTE_UNUSED
39302 rs6000_stack_protect_fail (void)
39304 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
39305 ? default_hidden_stack_protect_fail ()
39306 : default_external_stack_protect_fail ();
39309 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
39311 #if TARGET_ELF
39312 static unsigned HOST_WIDE_INT
39313 rs6000_asan_shadow_offset (void)
39315 return (unsigned HOST_WIDE_INT) 1 << (TARGET_64BIT ? 41 : 29);
39317 #endif
39319 /* Mask options that we want to support inside of attribute((target)) and
39320 #pragma GCC target operations. Note, we do not include things like
39321 64/32-bit, endianness, hard/soft floating point, etc. that would have
39322 different calling sequences. */
39324 struct rs6000_opt_mask {
39325 const char *name; /* option name */
39326 HOST_WIDE_INT mask; /* mask to set */
39327 bool invert; /* invert sense of mask */
39328 bool valid_target; /* option is a target option */
39331 static struct rs6000_opt_mask const rs6000_opt_masks[] =
39333 { "altivec", OPTION_MASK_ALTIVEC, false, true },
39334 { "cmpb", OPTION_MASK_CMPB, false, true },
39335 { "crypto", OPTION_MASK_CRYPTO, false, true },
39336 { "direct-move", OPTION_MASK_DIRECT_MOVE, false, true },
39337 { "dlmzb", OPTION_MASK_DLMZB, false, true },
39338 { "efficient-unaligned-vsx", OPTION_MASK_EFFICIENT_UNALIGNED_VSX,
39339 false, true },
39340 { "float128", OPTION_MASK_FLOAT128_KEYWORD, false, false },
39341 { "float128-type", OPTION_MASK_FLOAT128_TYPE, false, false },
39342 { "float128-hardware", OPTION_MASK_FLOAT128_HW, false, false },
39343 { "fprnd", OPTION_MASK_FPRND, false, true },
39344 { "hard-dfp", OPTION_MASK_DFP, false, true },
39345 { "htm", OPTION_MASK_HTM, false, true },
39346 { "isel", OPTION_MASK_ISEL, false, true },
39347 { "mfcrf", OPTION_MASK_MFCRF, false, true },
39348 { "mfpgpr", OPTION_MASK_MFPGPR, false, true },
39349 { "modulo", OPTION_MASK_MODULO, false, true },
39350 { "mulhw", OPTION_MASK_MULHW, false, true },
39351 { "multiple", OPTION_MASK_MULTIPLE, false, true },
39352 { "popcntb", OPTION_MASK_POPCNTB, false, true },
39353 { "popcntd", OPTION_MASK_POPCNTD, false, true },
39354 { "power8-fusion", OPTION_MASK_P8_FUSION, false, true },
39355 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN, false, true },
39356 { "power8-vector", OPTION_MASK_P8_VECTOR, false, true },
39357 { "power9-dform-scalar", OPTION_MASK_P9_DFORM_SCALAR, false, true },
39358 { "power9-dform-vector", OPTION_MASK_P9_DFORM_VECTOR, false, true },
39359 { "power9-fusion", OPTION_MASK_P9_FUSION, false, true },
39360 { "power9-minmax", OPTION_MASK_P9_MINMAX, false, true },
39361 { "power9-misc", OPTION_MASK_P9_MISC, false, true },
39362 { "power9-vector", OPTION_MASK_P9_VECTOR, false, true },
39363 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT, false, true },
39364 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT, false, true },
39365 { "quad-memory", OPTION_MASK_QUAD_MEMORY, false, true },
39366 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC, false, true },
39367 { "recip-precision", OPTION_MASK_RECIP_PRECISION, false, true },
39368 { "save-toc-indirect", OPTION_MASK_SAVE_TOC_INDIRECT, false, true },
39369 { "string", OPTION_MASK_STRING, false, true },
39370 { "toc-fusion", OPTION_MASK_TOC_FUSION, false, true },
39371 { "update", OPTION_MASK_NO_UPDATE, true , true },
39372 { "upper-regs-di", OPTION_MASK_UPPER_REGS_DI, false, true },
39373 { "upper-regs-df", OPTION_MASK_UPPER_REGS_DF, false, true },
39374 { "upper-regs-sf", OPTION_MASK_UPPER_REGS_SF, false, true },
39375 { "vsx", OPTION_MASK_VSX, false, true },
39376 { "vsx-small-integer", OPTION_MASK_VSX_SMALL_INTEGER, false, true },
39377 { "vsx-timode", OPTION_MASK_VSX_TIMODE, false, true },
39378 #ifdef OPTION_MASK_64BIT
39379 #if TARGET_AIX_OS
39380 { "aix64", OPTION_MASK_64BIT, false, false },
39381 { "aix32", OPTION_MASK_64BIT, true, false },
39382 #else
39383 { "64", OPTION_MASK_64BIT, false, false },
39384 { "32", OPTION_MASK_64BIT, true, false },
39385 #endif
39386 #endif
39387 #ifdef OPTION_MASK_EABI
39388 { "eabi", OPTION_MASK_EABI, false, false },
39389 #endif
39390 #ifdef OPTION_MASK_LITTLE_ENDIAN
39391 { "little", OPTION_MASK_LITTLE_ENDIAN, false, false },
39392 { "big", OPTION_MASK_LITTLE_ENDIAN, true, false },
39393 #endif
39394 #ifdef OPTION_MASK_RELOCATABLE
39395 { "relocatable", OPTION_MASK_RELOCATABLE, false, false },
39396 #endif
39397 #ifdef OPTION_MASK_STRICT_ALIGN
39398 { "strict-align", OPTION_MASK_STRICT_ALIGN, false, false },
39399 #endif
39400 { "soft-float", OPTION_MASK_SOFT_FLOAT, false, false },
39401 { "string", OPTION_MASK_STRING, false, false },
39404 /* Builtin mask mapping for printing the flags. */
39405 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
39407 { "altivec", RS6000_BTM_ALTIVEC, false, false },
39408 { "vsx", RS6000_BTM_VSX, false, false },
39409 { "spe", RS6000_BTM_SPE, false, false },
39410 { "paired", RS6000_BTM_PAIRED, false, false },
39411 { "fre", RS6000_BTM_FRE, false, false },
39412 { "fres", RS6000_BTM_FRES, false, false },
39413 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
39414 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
39415 { "popcntd", RS6000_BTM_POPCNTD, false, false },
39416 { "cell", RS6000_BTM_CELL, false, false },
39417 { "power8-vector", RS6000_BTM_P8_VECTOR, false, false },
39418 { "power9-vector", RS6000_BTM_P9_VECTOR, false, false },
39419 { "power9-misc", RS6000_BTM_P9_MISC, false, false },
39420 { "crypto", RS6000_BTM_CRYPTO, false, false },
39421 { "htm", RS6000_BTM_HTM, false, false },
39422 { "hard-dfp", RS6000_BTM_DFP, false, false },
39423 { "hard-float", RS6000_BTM_HARD_FLOAT, false, false },
39424 { "long-double-128", RS6000_BTM_LDBL128, false, false },
39425 { "float128", RS6000_BTM_FLOAT128, false, false },
39428 /* Option variables that we want to support inside attribute((target)) and
39429 #pragma GCC target operations. */
39431 struct rs6000_opt_var {
39432 const char *name; /* option name */
39433 size_t global_offset; /* offset of the option in global_options. */
39434 size_t target_offset; /* offset of the option in target options. */
39437 static struct rs6000_opt_var const rs6000_opt_vars[] =
39439 { "friz",
39440 offsetof (struct gcc_options, x_TARGET_FRIZ),
39441 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
39442 { "avoid-indexed-addresses",
39443 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
39444 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
39445 { "paired",
39446 offsetof (struct gcc_options, x_rs6000_paired_float),
39447 offsetof (struct cl_target_option, x_rs6000_paired_float), },
39448 { "longcall",
39449 offsetof (struct gcc_options, x_rs6000_default_long_calls),
39450 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
39451 { "optimize-swaps",
39452 offsetof (struct gcc_options, x_rs6000_optimize_swaps),
39453 offsetof (struct cl_target_option, x_rs6000_optimize_swaps), },
39454 { "allow-movmisalign",
39455 offsetof (struct gcc_options, x_TARGET_ALLOW_MOVMISALIGN),
39456 offsetof (struct cl_target_option, x_TARGET_ALLOW_MOVMISALIGN), },
39457 { "allow-df-permute",
39458 offsetof (struct gcc_options, x_TARGET_ALLOW_DF_PERMUTE),
39459 offsetof (struct cl_target_option, x_TARGET_ALLOW_DF_PERMUTE), },
39460 { "sched-groups",
39461 offsetof (struct gcc_options, x_TARGET_SCHED_GROUPS),
39462 offsetof (struct cl_target_option, x_TARGET_SCHED_GROUPS), },
39463 { "always-hint",
39464 offsetof (struct gcc_options, x_TARGET_ALWAYS_HINT),
39465 offsetof (struct cl_target_option, x_TARGET_ALWAYS_HINT), },
39466 { "align-branch-targets",
39467 offsetof (struct gcc_options, x_TARGET_ALIGN_BRANCH_TARGETS),
39468 offsetof (struct cl_target_option, x_TARGET_ALIGN_BRANCH_TARGETS), },
39469 { "vectorize-builtins",
39470 offsetof (struct gcc_options, x_TARGET_VECTORIZE_BUILTINS),
39471 offsetof (struct cl_target_option, x_TARGET_VECTORIZE_BUILTINS), },
39472 { "tls-markers",
39473 offsetof (struct gcc_options, x_tls_markers),
39474 offsetof (struct cl_target_option, x_tls_markers), },
39475 { "sched-prolog",
39476 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
39477 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
39478 { "sched-epilog",
39479 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
39480 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
39483 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
39484 parsing. Return true if there were no errors. */
39486 static bool
39487 rs6000_inner_target_options (tree args, bool attr_p)
39489 bool ret = true;
39491 if (args == NULL_TREE)
39494 else if (TREE_CODE (args) == STRING_CST)
39496 char *p = ASTRDUP (TREE_STRING_POINTER (args));
39497 char *q;
39499 while ((q = strtok (p, ",")) != NULL)
39501 bool error_p = false;
39502 bool not_valid_p = false;
39503 const char *cpu_opt = NULL;
39505 p = NULL;
39506 if (strncmp (q, "cpu=", 4) == 0)
39508 int cpu_index = rs6000_cpu_name_lookup (q+4);
39509 if (cpu_index >= 0)
39510 rs6000_cpu_index = cpu_index;
39511 else
39513 error_p = true;
39514 cpu_opt = q+4;
39517 else if (strncmp (q, "tune=", 5) == 0)
39519 int tune_index = rs6000_cpu_name_lookup (q+5);
39520 if (tune_index >= 0)
39521 rs6000_tune_index = tune_index;
39522 else
39524 error_p = true;
39525 cpu_opt = q+5;
39528 else
39530 size_t i;
39531 bool invert = false;
39532 char *r = q;
39534 error_p = true;
39535 if (strncmp (r, "no-", 3) == 0)
39537 invert = true;
39538 r += 3;
39541 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
39542 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
39544 HOST_WIDE_INT mask = rs6000_opt_masks[i].mask;
39546 if (!rs6000_opt_masks[i].valid_target)
39547 not_valid_p = true;
39548 else
39550 error_p = false;
39551 rs6000_isa_flags_explicit |= mask;
39553 /* VSX needs altivec, so -mvsx automagically sets
39554 altivec and disables -mavoid-indexed-addresses. */
39555 if (!invert)
39557 if (mask == OPTION_MASK_VSX)
39559 mask |= OPTION_MASK_ALTIVEC;
39560 TARGET_AVOID_XFORM = 0;
39564 if (rs6000_opt_masks[i].invert)
39565 invert = !invert;
39567 if (invert)
39568 rs6000_isa_flags &= ~mask;
39569 else
39570 rs6000_isa_flags |= mask;
39572 break;
39575 if (error_p && !not_valid_p)
39577 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
39578 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
39580 size_t j = rs6000_opt_vars[i].global_offset;
39581 *((int *) ((char *)&global_options + j)) = !invert;
39582 error_p = false;
39583 not_valid_p = false;
39584 break;
39589 if (error_p)
39591 const char *eprefix, *esuffix;
39593 ret = false;
39594 if (attr_p)
39596 eprefix = "__attribute__((__target__(";
39597 esuffix = ")))";
39599 else
39601 eprefix = "#pragma GCC target ";
39602 esuffix = "";
39605 if (cpu_opt)
39606 error ("invalid cpu \"%s\" for %s\"%s\"%s", cpu_opt, eprefix,
39607 q, esuffix);
39608 else if (not_valid_p)
39609 error ("%s\"%s\"%s is not allowed", eprefix, q, esuffix);
39610 else
39611 error ("%s\"%s\"%s is invalid", eprefix, q, esuffix);
39616 else if (TREE_CODE (args) == TREE_LIST)
39620 tree value = TREE_VALUE (args);
39621 if (value)
39623 bool ret2 = rs6000_inner_target_options (value, attr_p);
39624 if (!ret2)
39625 ret = false;
39627 args = TREE_CHAIN (args);
39629 while (args != NULL_TREE);
39632 else
39634 error ("attribute %<target%> argument not a string");
39635 return false;
39638 return ret;
39641 /* Print out the target options as a list for -mdebug=target. */
39643 static void
39644 rs6000_debug_target_options (tree args, const char *prefix)
39646 if (args == NULL_TREE)
39647 fprintf (stderr, "%s<NULL>", prefix);
39649 else if (TREE_CODE (args) == STRING_CST)
39651 char *p = ASTRDUP (TREE_STRING_POINTER (args));
39652 char *q;
39654 while ((q = strtok (p, ",")) != NULL)
39656 p = NULL;
39657 fprintf (stderr, "%s\"%s\"", prefix, q);
39658 prefix = ", ";
39662 else if (TREE_CODE (args) == TREE_LIST)
39666 tree value = TREE_VALUE (args);
39667 if (value)
39669 rs6000_debug_target_options (value, prefix);
39670 prefix = ", ";
39672 args = TREE_CHAIN (args);
39674 while (args != NULL_TREE);
39677 else
39678 gcc_unreachable ();
39680 return;
39684 /* Hook to validate attribute((target("..."))). */
39686 static bool
39687 rs6000_valid_attribute_p (tree fndecl,
39688 tree ARG_UNUSED (name),
39689 tree args,
39690 int flags)
39692 struct cl_target_option cur_target;
39693 bool ret;
39694 tree old_optimize = build_optimization_node (&global_options);
39695 tree new_target, new_optimize;
39696 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
39698 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
39700 if (TARGET_DEBUG_TARGET)
39702 tree tname = DECL_NAME (fndecl);
39703 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
39704 if (tname)
39705 fprintf (stderr, "function: %.*s\n",
39706 (int) IDENTIFIER_LENGTH (tname),
39707 IDENTIFIER_POINTER (tname));
39708 else
39709 fprintf (stderr, "function: unknown\n");
39711 fprintf (stderr, "args:");
39712 rs6000_debug_target_options (args, " ");
39713 fprintf (stderr, "\n");
39715 if (flags)
39716 fprintf (stderr, "flags: 0x%x\n", flags);
39718 fprintf (stderr, "--------------------\n");
39721 old_optimize = build_optimization_node (&global_options);
39722 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
39724 /* If the function changed the optimization levels as well as setting target
39725 options, start with the optimizations specified. */
39726 if (func_optimize && func_optimize != old_optimize)
39727 cl_optimization_restore (&global_options,
39728 TREE_OPTIMIZATION (func_optimize));
39730 /* The target attributes may also change some optimization flags, so update
39731 the optimization options if necessary. */
39732 cl_target_option_save (&cur_target, &global_options);
39733 rs6000_cpu_index = rs6000_tune_index = -1;
39734 ret = rs6000_inner_target_options (args, true);
39736 /* Set up any additional state. */
39737 if (ret)
39739 ret = rs6000_option_override_internal (false);
39740 new_target = build_target_option_node (&global_options);
39742 else
39743 new_target = NULL;
39745 new_optimize = build_optimization_node (&global_options);
39747 if (!new_target)
39748 ret = false;
39750 else if (fndecl)
39752 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
39754 if (old_optimize != new_optimize)
39755 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
39758 cl_target_option_restore (&global_options, &cur_target);
39760 if (old_optimize != new_optimize)
39761 cl_optimization_restore (&global_options,
39762 TREE_OPTIMIZATION (old_optimize));
39764 return ret;
39768 /* Hook to validate the current #pragma GCC target and set the state, and
39769 update the macros based on what was changed. If ARGS is NULL, then
39770 POP_TARGET is used to reset the options. */
39772 bool
39773 rs6000_pragma_target_parse (tree args, tree pop_target)
39775 tree prev_tree = build_target_option_node (&global_options);
39776 tree cur_tree;
39777 struct cl_target_option *prev_opt, *cur_opt;
39778 HOST_WIDE_INT prev_flags, cur_flags, diff_flags;
39779 HOST_WIDE_INT prev_bumask, cur_bumask, diff_bumask;
39781 if (TARGET_DEBUG_TARGET)
39783 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
39784 fprintf (stderr, "args:");
39785 rs6000_debug_target_options (args, " ");
39786 fprintf (stderr, "\n");
39788 if (pop_target)
39790 fprintf (stderr, "pop_target:\n");
39791 debug_tree (pop_target);
39793 else
39794 fprintf (stderr, "pop_target: <NULL>\n");
39796 fprintf (stderr, "--------------------\n");
39799 if (! args)
39801 cur_tree = ((pop_target)
39802 ? pop_target
39803 : target_option_default_node);
39804 cl_target_option_restore (&global_options,
39805 TREE_TARGET_OPTION (cur_tree));
39807 else
39809 rs6000_cpu_index = rs6000_tune_index = -1;
39810 if (!rs6000_inner_target_options (args, false)
39811 || !rs6000_option_override_internal (false)
39812 || (cur_tree = build_target_option_node (&global_options))
39813 == NULL_TREE)
39815 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
39816 fprintf (stderr, "invalid pragma\n");
39818 return false;
39822 target_option_current_node = cur_tree;
39824 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
39825 change the macros that are defined. */
39826 if (rs6000_target_modify_macros_ptr)
39828 prev_opt = TREE_TARGET_OPTION (prev_tree);
39829 prev_bumask = prev_opt->x_rs6000_builtin_mask;
39830 prev_flags = prev_opt->x_rs6000_isa_flags;
39832 cur_opt = TREE_TARGET_OPTION (cur_tree);
39833 cur_flags = cur_opt->x_rs6000_isa_flags;
39834 cur_bumask = cur_opt->x_rs6000_builtin_mask;
39836 diff_bumask = (prev_bumask ^ cur_bumask);
39837 diff_flags = (prev_flags ^ cur_flags);
39839 if ((diff_flags != 0) || (diff_bumask != 0))
39841 /* Delete old macros. */
39842 rs6000_target_modify_macros_ptr (false,
39843 prev_flags & diff_flags,
39844 prev_bumask & diff_bumask);
39846 /* Define new macros. */
39847 rs6000_target_modify_macros_ptr (true,
39848 cur_flags & diff_flags,
39849 cur_bumask & diff_bumask);
39853 return true;
39857 /* Remember the last target of rs6000_set_current_function. */
39858 static GTY(()) tree rs6000_previous_fndecl;
39860 /* Establish appropriate back-end context for processing the function
39861 FNDECL. The argument might be NULL to indicate processing at top
39862 level, outside of any function scope. */
39863 static void
39864 rs6000_set_current_function (tree fndecl)
39866 tree old_tree = (rs6000_previous_fndecl
39867 ? DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl)
39868 : NULL_TREE);
39870 tree new_tree = (fndecl
39871 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
39872 : NULL_TREE);
39874 if (TARGET_DEBUG_TARGET)
39876 bool print_final = false;
39877 fprintf (stderr, "\n==================== rs6000_set_current_function");
39879 if (fndecl)
39880 fprintf (stderr, ", fndecl %s (%p)",
39881 (DECL_NAME (fndecl)
39882 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
39883 : "<unknown>"), (void *)fndecl);
39885 if (rs6000_previous_fndecl)
39886 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
39888 fprintf (stderr, "\n");
39889 if (new_tree)
39891 fprintf (stderr, "\nnew fndecl target specific options:\n");
39892 debug_tree (new_tree);
39893 print_final = true;
39896 if (old_tree)
39898 fprintf (stderr, "\nold fndecl target specific options:\n");
39899 debug_tree (old_tree);
39900 print_final = true;
39903 if (print_final)
39904 fprintf (stderr, "--------------------\n");
39907 /* Only change the context if the function changes. This hook is called
39908 several times in the course of compiling a function, and we don't want to
39909 slow things down too much or call target_reinit when it isn't safe. */
39910 if (fndecl && fndecl != rs6000_previous_fndecl)
39912 rs6000_previous_fndecl = fndecl;
39913 if (old_tree == new_tree)
39916 else if (new_tree && new_tree != target_option_default_node)
39918 cl_target_option_restore (&global_options,
39919 TREE_TARGET_OPTION (new_tree));
39920 if (TREE_TARGET_GLOBALS (new_tree))
39921 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
39922 else
39923 TREE_TARGET_GLOBALS (new_tree)
39924 = save_target_globals_default_opts ();
39927 else if (old_tree && old_tree != target_option_default_node)
39929 new_tree = target_option_current_node;
39930 cl_target_option_restore (&global_options,
39931 TREE_TARGET_OPTION (new_tree));
39932 if (TREE_TARGET_GLOBALS (new_tree))
39933 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
39934 else if (new_tree == target_option_default_node)
39935 restore_target_globals (&default_target_globals);
39936 else
39937 TREE_TARGET_GLOBALS (new_tree)
39938 = save_target_globals_default_opts ();
39944 /* Save the current options */
39946 static void
39947 rs6000_function_specific_save (struct cl_target_option *ptr,
39948 struct gcc_options *opts)
39950 ptr->x_rs6000_isa_flags = opts->x_rs6000_isa_flags;
39951 ptr->x_rs6000_isa_flags_explicit = opts->x_rs6000_isa_flags_explicit;
39954 /* Restore the current options */
39956 static void
39957 rs6000_function_specific_restore (struct gcc_options *opts,
39958 struct cl_target_option *ptr)
39961 opts->x_rs6000_isa_flags = ptr->x_rs6000_isa_flags;
39962 opts->x_rs6000_isa_flags_explicit = ptr->x_rs6000_isa_flags_explicit;
39963 (void) rs6000_option_override_internal (false);
39966 /* Print the current options */
39968 static void
39969 rs6000_function_specific_print (FILE *file, int indent,
39970 struct cl_target_option *ptr)
39972 rs6000_print_isa_options (file, indent, "Isa options set",
39973 ptr->x_rs6000_isa_flags);
39975 rs6000_print_isa_options (file, indent, "Isa options explicit",
39976 ptr->x_rs6000_isa_flags_explicit);
39979 /* Helper function to print the current isa or misc options on a line. */
39981 static void
39982 rs6000_print_options_internal (FILE *file,
39983 int indent,
39984 const char *string,
39985 HOST_WIDE_INT flags,
39986 const char *prefix,
39987 const struct rs6000_opt_mask *opts,
39988 size_t num_elements)
39990 size_t i;
39991 size_t start_column = 0;
39992 size_t cur_column;
39993 size_t max_column = 120;
39994 size_t prefix_len = strlen (prefix);
39995 size_t comma_len = 0;
39996 const char *comma = "";
39998 if (indent)
39999 start_column += fprintf (file, "%*s", indent, "");
40001 if (!flags)
40003 fprintf (stderr, DEBUG_FMT_S, string, "<none>");
40004 return;
40007 start_column += fprintf (stderr, DEBUG_FMT_WX, string, flags);
40009 /* Print the various mask options. */
40010 cur_column = start_column;
40011 for (i = 0; i < num_elements; i++)
40013 bool invert = opts[i].invert;
40014 const char *name = opts[i].name;
40015 const char *no_str = "";
40016 HOST_WIDE_INT mask = opts[i].mask;
40017 size_t len = comma_len + prefix_len + strlen (name);
40019 if (!invert)
40021 if ((flags & mask) == 0)
40023 no_str = "no-";
40024 len += sizeof ("no-") - 1;
40027 flags &= ~mask;
40030 else
40032 if ((flags & mask) != 0)
40034 no_str = "no-";
40035 len += sizeof ("no-") - 1;
40038 flags |= mask;
40041 cur_column += len;
40042 if (cur_column > max_column)
40044 fprintf (stderr, ", \\\n%*s", (int)start_column, "");
40045 cur_column = start_column + len;
40046 comma = "";
40049 fprintf (file, "%s%s%s%s", comma, prefix, no_str, name);
40050 comma = ", ";
40051 comma_len = sizeof (", ") - 1;
40054 fputs ("\n", file);
40057 /* Helper function to print the current isa options on a line. */
40059 static void
40060 rs6000_print_isa_options (FILE *file, int indent, const char *string,
40061 HOST_WIDE_INT flags)
40063 rs6000_print_options_internal (file, indent, string, flags, "-m",
40064 &rs6000_opt_masks[0],
40065 ARRAY_SIZE (rs6000_opt_masks));
40068 static void
40069 rs6000_print_builtin_options (FILE *file, int indent, const char *string,
40070 HOST_WIDE_INT flags)
40072 rs6000_print_options_internal (file, indent, string, flags, "",
40073 &rs6000_builtin_mask_names[0],
40074 ARRAY_SIZE (rs6000_builtin_mask_names));
40077 /* If the user used -mno-vsx, we need turn off all of the implicit ISA 2.06,
40078 2.07, and 3.0 options that relate to the vector unit (-mdirect-move,
40079 -mvsx-timode, -mupper-regs-df).
40081 If the user used -mno-power8-vector, we need to turn off all of the implicit
40082 ISA 2.07 and 3.0 options that relate to the vector unit.
40084 If the user used -mno-power9-vector, we need to turn off all of the implicit
40085 ISA 3.0 options that relate to the vector unit.
40087 This function does not handle explicit options such as the user specifying
40088 -mdirect-move. These are handled in rs6000_option_override_internal, and
40089 the appropriate error is given if needed.
40091 We return a mask of all of the implicit options that should not be enabled
40092 by default. */
40094 static HOST_WIDE_INT
40095 rs6000_disable_incompatible_switches (void)
40097 HOST_WIDE_INT ignore_masks = rs6000_isa_flags_explicit;
40098 size_t i, j;
40100 static const struct {
40101 const HOST_WIDE_INT no_flag; /* flag explicitly turned off. */
40102 const HOST_WIDE_INT dep_flags; /* flags that depend on this option. */
40103 const char *const name; /* name of the switch. */
40104 } flags[] = {
40105 { OPTION_MASK_P9_VECTOR, OTHER_P9_VECTOR_MASKS, "power9-vector" },
40106 { OPTION_MASK_P8_VECTOR, OTHER_P8_VECTOR_MASKS, "power8-vector" },
40107 { OPTION_MASK_VSX, OTHER_VSX_VECTOR_MASKS, "vsx" },
40110 for (i = 0; i < ARRAY_SIZE (flags); i++)
40112 HOST_WIDE_INT no_flag = flags[i].no_flag;
40114 if ((rs6000_isa_flags & no_flag) == 0
40115 && (rs6000_isa_flags_explicit & no_flag) != 0)
40117 HOST_WIDE_INT dep_flags = flags[i].dep_flags;
40118 HOST_WIDE_INT set_flags = (rs6000_isa_flags_explicit
40119 & rs6000_isa_flags
40120 & dep_flags);
40122 if (set_flags)
40124 for (j = 0; j < ARRAY_SIZE (rs6000_opt_masks); j++)
40125 if ((set_flags & rs6000_opt_masks[j].mask) != 0)
40127 set_flags &= ~rs6000_opt_masks[j].mask;
40128 error ("-mno-%s turns off -m%s",
40129 flags[i].name,
40130 rs6000_opt_masks[j].name);
40133 gcc_assert (!set_flags);
40136 rs6000_isa_flags &= ~dep_flags;
40137 ignore_masks |= no_flag | dep_flags;
40141 if (!TARGET_P9_VECTOR
40142 && (rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) != 0
40143 && TARGET_P9_DFORM_BOTH > 0)
40145 error ("-mno-power9-vector turns off -mpower9-dform");
40146 TARGET_P9_DFORM_BOTH = 0;
40149 return ignore_masks;
40153 /* Hook to determine if one function can safely inline another. */
40155 static bool
40156 rs6000_can_inline_p (tree caller, tree callee)
40158 bool ret = false;
40159 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
40160 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
40162 /* If callee has no option attributes, then it is ok to inline. */
40163 if (!callee_tree)
40164 ret = true;
40166 /* If caller has no option attributes, but callee does then it is not ok to
40167 inline. */
40168 else if (!caller_tree)
40169 ret = false;
40171 else
40173 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
40174 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
40176 /* Callee's options should a subset of the caller's, i.e. a vsx function
40177 can inline an altivec function but a non-vsx function can't inline a
40178 vsx function. */
40179 if ((caller_opts->x_rs6000_isa_flags & callee_opts->x_rs6000_isa_flags)
40180 == callee_opts->x_rs6000_isa_flags)
40181 ret = true;
40184 if (TARGET_DEBUG_TARGET)
40185 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
40186 (DECL_NAME (caller)
40187 ? IDENTIFIER_POINTER (DECL_NAME (caller))
40188 : "<unknown>"),
40189 (DECL_NAME (callee)
40190 ? IDENTIFIER_POINTER (DECL_NAME (callee))
40191 : "<unknown>"),
40192 (ret ? "can" : "cannot"));
40194 return ret;
40197 /* Allocate a stack temp and fixup the address so it meets the particular
40198 memory requirements (either offetable or REG+REG addressing). */
40201 rs6000_allocate_stack_temp (machine_mode mode,
40202 bool offsettable_p,
40203 bool reg_reg_p)
40205 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
40206 rtx addr = XEXP (stack, 0);
40207 int strict_p = (reload_in_progress || reload_completed);
40209 if (!legitimate_indirect_address_p (addr, strict_p))
40211 if (offsettable_p
40212 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
40213 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
40215 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
40216 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
40219 return stack;
40222 /* Given a memory reference, if it is not a reg or reg+reg addressing, convert
40223 to such a form to deal with memory reference instructions like STFIWX that
40224 only take reg+reg addressing. */
40227 rs6000_address_for_fpconvert (rtx x)
40229 int strict_p = (reload_in_progress || reload_completed);
40230 rtx addr;
40232 gcc_assert (MEM_P (x));
40233 addr = XEXP (x, 0);
40234 if (! legitimate_indirect_address_p (addr, strict_p)
40235 && ! legitimate_indexed_address_p (addr, strict_p))
40237 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
40239 rtx reg = XEXP (addr, 0);
40240 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
40241 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
40242 gcc_assert (REG_P (reg));
40243 emit_insn (gen_add3_insn (reg, reg, size_rtx));
40244 addr = reg;
40246 else if (GET_CODE (addr) == PRE_MODIFY)
40248 rtx reg = XEXP (addr, 0);
40249 rtx expr = XEXP (addr, 1);
40250 gcc_assert (REG_P (reg));
40251 gcc_assert (GET_CODE (expr) == PLUS);
40252 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
40253 addr = reg;
40256 x = replace_equiv_address (x, copy_addr_to_reg (addr));
40259 return x;
40262 /* Given a memory reference, if it is not in the form for altivec memory
40263 reference instructions (i.e. reg or reg+reg addressing with AND of -16),
40264 convert to the altivec format. */
40267 rs6000_address_for_altivec (rtx x)
40269 gcc_assert (MEM_P (x));
40270 if (!altivec_indexed_or_indirect_operand (x, GET_MODE (x)))
40272 rtx addr = XEXP (x, 0);
40273 int strict_p = (reload_in_progress || reload_completed);
40275 if (!legitimate_indexed_address_p (addr, strict_p)
40276 && !legitimate_indirect_address_p (addr, strict_p))
40277 addr = copy_to_mode_reg (Pmode, addr);
40279 addr = gen_rtx_AND (Pmode, addr, GEN_INT (-16));
40280 x = change_address (x, GET_MODE (x), addr);
40283 return x;
40286 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
40288 On the RS/6000, all integer constants are acceptable, most won't be valid
40289 for particular insns, though. Only easy FP constants are acceptable. */
40291 static bool
40292 rs6000_legitimate_constant_p (machine_mode mode, rtx x)
40294 if (TARGET_ELF && tls_referenced_p (x))
40295 return false;
40297 return ((GET_CODE (x) != CONST_DOUBLE && GET_CODE (x) != CONST_VECTOR)
40298 || GET_MODE (x) == VOIDmode
40299 || (TARGET_POWERPC64 && mode == DImode)
40300 || easy_fp_constant (x, mode)
40301 || easy_vector_constant (x, mode));
40305 /* Return TRUE iff the sequence ending in LAST sets the static chain. */
40307 static bool
40308 chain_already_loaded (rtx_insn *last)
40310 for (; last != NULL; last = PREV_INSN (last))
40312 if (NONJUMP_INSN_P (last))
40314 rtx patt = PATTERN (last);
40316 if (GET_CODE (patt) == SET)
40318 rtx lhs = XEXP (patt, 0);
40320 if (REG_P (lhs) && REGNO (lhs) == STATIC_CHAIN_REGNUM)
40321 return true;
40325 return false;
40328 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
40330 void
40331 rs6000_call_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
40333 const bool direct_call_p
40334 = GET_CODE (func_desc) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (func_desc);
40335 rtx toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
40336 rtx toc_load = NULL_RTX;
40337 rtx toc_restore = NULL_RTX;
40338 rtx func_addr;
40339 rtx abi_reg = NULL_RTX;
40340 rtx call[4];
40341 int n_call;
40342 rtx insn;
40344 /* Handle longcall attributes. */
40345 if (INTVAL (cookie) & CALL_LONG)
40346 func_desc = rs6000_longcall_ref (func_desc);
40348 /* Handle indirect calls. */
40349 if (GET_CODE (func_desc) != SYMBOL_REF
40350 || (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (func_desc)))
40352 /* Save the TOC into its reserved slot before the call,
40353 and prepare to restore it after the call. */
40354 rtx stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
40355 rtx stack_toc_offset = GEN_INT (RS6000_TOC_SAVE_SLOT);
40356 rtx stack_toc_mem = gen_frame_mem (Pmode,
40357 gen_rtx_PLUS (Pmode, stack_ptr,
40358 stack_toc_offset));
40359 rtx stack_toc_unspec = gen_rtx_UNSPEC (Pmode,
40360 gen_rtvec (1, stack_toc_offset),
40361 UNSPEC_TOCSLOT);
40362 toc_restore = gen_rtx_SET (toc_reg, stack_toc_unspec);
40364 /* Can we optimize saving the TOC in the prologue or
40365 do we need to do it at every call? */
40366 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
40367 cfun->machine->save_toc_in_prologue = true;
40368 else
40370 MEM_VOLATILE_P (stack_toc_mem) = 1;
40371 emit_move_insn (stack_toc_mem, toc_reg);
40374 if (DEFAULT_ABI == ABI_ELFv2)
40376 /* A function pointer in the ELFv2 ABI is just a plain address, but
40377 the ABI requires it to be loaded into r12 before the call. */
40378 func_addr = gen_rtx_REG (Pmode, 12);
40379 emit_move_insn (func_addr, func_desc);
40380 abi_reg = func_addr;
40382 else
40384 /* A function pointer under AIX is a pointer to a data area whose
40385 first word contains the actual address of the function, whose
40386 second word contains a pointer to its TOC, and whose third word
40387 contains a value to place in the static chain register (r11).
40388 Note that if we load the static chain, our "trampoline" need
40389 not have any executable code. */
40391 /* Load up address of the actual function. */
40392 func_desc = force_reg (Pmode, func_desc);
40393 func_addr = gen_reg_rtx (Pmode);
40394 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func_desc));
40396 /* Prepare to load the TOC of the called function. Note that the
40397 TOC load must happen immediately before the actual call so
40398 that unwinding the TOC registers works correctly. See the
40399 comment in frob_update_context. */
40400 rtx func_toc_offset = GEN_INT (GET_MODE_SIZE (Pmode));
40401 rtx func_toc_mem = gen_rtx_MEM (Pmode,
40402 gen_rtx_PLUS (Pmode, func_desc,
40403 func_toc_offset));
40404 toc_load = gen_rtx_USE (VOIDmode, func_toc_mem);
40406 /* If we have a static chain, load it up. But, if the call was
40407 originally direct, the 3rd word has not been written since no
40408 trampoline has been built, so we ought not to load it, lest we
40409 override a static chain value. */
40410 if (!direct_call_p
40411 && TARGET_POINTERS_TO_NESTED_FUNCTIONS
40412 && !chain_already_loaded (get_current_sequence ()->next->last))
40414 rtx sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
40415 rtx func_sc_offset = GEN_INT (2 * GET_MODE_SIZE (Pmode));
40416 rtx func_sc_mem = gen_rtx_MEM (Pmode,
40417 gen_rtx_PLUS (Pmode, func_desc,
40418 func_sc_offset));
40419 emit_move_insn (sc_reg, func_sc_mem);
40420 abi_reg = sc_reg;
40424 else
40426 /* Direct calls use the TOC: for local calls, the callee will
40427 assume the TOC register is set; for non-local calls, the
40428 PLT stub needs the TOC register. */
40429 abi_reg = toc_reg;
40430 func_addr = func_desc;
40433 /* Create the call. */
40434 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), flag);
40435 if (value != NULL_RTX)
40436 call[0] = gen_rtx_SET (value, call[0]);
40437 n_call = 1;
40439 if (toc_load)
40440 call[n_call++] = toc_load;
40441 if (toc_restore)
40442 call[n_call++] = toc_restore;
40444 call[n_call++] = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
40446 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n_call, call));
40447 insn = emit_call_insn (insn);
40449 /* Mention all registers defined by the ABI to hold information
40450 as uses in CALL_INSN_FUNCTION_USAGE. */
40451 if (abi_reg)
40452 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
40455 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
40457 void
40458 rs6000_sibcall_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
40460 rtx call[2];
40461 rtx insn;
40463 gcc_assert (INTVAL (cookie) == 0);
40465 /* Create the call. */
40466 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_desc), flag);
40467 if (value != NULL_RTX)
40468 call[0] = gen_rtx_SET (value, call[0]);
40470 call[1] = simple_return_rtx;
40472 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (2, call));
40473 insn = emit_call_insn (insn);
40475 /* Note use of the TOC register. */
40476 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, TOC_REGNUM));
40479 /* Return whether we need to always update the saved TOC pointer when we update
40480 the stack pointer. */
40482 static bool
40483 rs6000_save_toc_in_prologue_p (void)
40485 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
40488 #ifdef HAVE_GAS_HIDDEN
40489 # define USE_HIDDEN_LINKONCE 1
40490 #else
40491 # define USE_HIDDEN_LINKONCE 0
40492 #endif
40494 /* Fills in the label name that should be used for a 476 link stack thunk. */
40496 void
40497 get_ppc476_thunk_name (char name[32])
40499 gcc_assert (TARGET_LINK_STACK);
40501 if (USE_HIDDEN_LINKONCE)
40502 sprintf (name, "__ppc476.get_thunk");
40503 else
40504 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
40507 /* This function emits the simple thunk routine that is used to preserve
40508 the link stack on the 476 cpu. */
40510 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
40511 static void
40512 rs6000_code_end (void)
40514 char name[32];
40515 tree decl;
40517 if (!TARGET_LINK_STACK)
40518 return;
40520 get_ppc476_thunk_name (name);
40522 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
40523 build_function_type_list (void_type_node, NULL_TREE));
40524 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
40525 NULL_TREE, void_type_node);
40526 TREE_PUBLIC (decl) = 1;
40527 TREE_STATIC (decl) = 1;
40529 #if RS6000_WEAK
40530 if (USE_HIDDEN_LINKONCE && !TARGET_XCOFF)
40532 cgraph_node::create (decl)->set_comdat_group (DECL_ASSEMBLER_NAME (decl));
40533 targetm.asm_out.unique_section (decl, 0);
40534 switch_to_section (get_named_section (decl, NULL, 0));
40535 DECL_WEAK (decl) = 1;
40536 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
40537 targetm.asm_out.globalize_label (asm_out_file, name);
40538 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
40539 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
40541 else
40542 #endif
40544 switch_to_section (text_section);
40545 ASM_OUTPUT_LABEL (asm_out_file, name);
40548 DECL_INITIAL (decl) = make_node (BLOCK);
40549 current_function_decl = decl;
40550 allocate_struct_function (decl, false);
40551 init_function_start (decl);
40552 first_function_block_is_cold = false;
40553 /* Make sure unwind info is emitted for the thunk if needed. */
40554 final_start_function (emit_barrier (), asm_out_file, 1);
40556 fputs ("\tblr\n", asm_out_file);
40558 final_end_function ();
40559 init_insn_lengths ();
40560 free_after_compilation (cfun);
40561 set_cfun (NULL);
40562 current_function_decl = NULL;
40565 /* Add r30 to hard reg set if the prologue sets it up and it is not
40566 pic_offset_table_rtx. */
40568 static void
40569 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
40571 if (!TARGET_SINGLE_PIC_BASE
40572 && TARGET_TOC
40573 && TARGET_MINIMAL_TOC
40574 && !constant_pool_empty_p ())
40575 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
40576 if (cfun->machine->split_stack_argp_used)
40577 add_to_hard_reg_set (&set->set, Pmode, 12);
40581 /* Helper function for rs6000_split_logical to emit a logical instruction after
40582 spliting the operation to single GPR registers.
40584 DEST is the destination register.
40585 OP1 and OP2 are the input source registers.
40586 CODE is the base operation (AND, IOR, XOR, NOT).
40587 MODE is the machine mode.
40588 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
40589 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
40590 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
40592 static void
40593 rs6000_split_logical_inner (rtx dest,
40594 rtx op1,
40595 rtx op2,
40596 enum rtx_code code,
40597 machine_mode mode,
40598 bool complement_final_p,
40599 bool complement_op1_p,
40600 bool complement_op2_p)
40602 rtx bool_rtx;
40604 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
40605 if (op2 && GET_CODE (op2) == CONST_INT
40606 && (mode == SImode || (mode == DImode && TARGET_POWERPC64))
40607 && !complement_final_p && !complement_op1_p && !complement_op2_p)
40609 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
40610 HOST_WIDE_INT value = INTVAL (op2) & mask;
40612 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
40613 if (code == AND)
40615 if (value == 0)
40617 emit_insn (gen_rtx_SET (dest, const0_rtx));
40618 return;
40621 else if (value == mask)
40623 if (!rtx_equal_p (dest, op1))
40624 emit_insn (gen_rtx_SET (dest, op1));
40625 return;
40629 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
40630 into separate ORI/ORIS or XORI/XORIS instrucitons. */
40631 else if (code == IOR || code == XOR)
40633 if (value == 0)
40635 if (!rtx_equal_p (dest, op1))
40636 emit_insn (gen_rtx_SET (dest, op1));
40637 return;
40642 if (code == AND && mode == SImode
40643 && !complement_final_p && !complement_op1_p && !complement_op2_p)
40645 emit_insn (gen_andsi3 (dest, op1, op2));
40646 return;
40649 if (complement_op1_p)
40650 op1 = gen_rtx_NOT (mode, op1);
40652 if (complement_op2_p)
40653 op2 = gen_rtx_NOT (mode, op2);
40655 /* For canonical RTL, if only one arm is inverted it is the first. */
40656 if (!complement_op1_p && complement_op2_p)
40657 std::swap (op1, op2);
40659 bool_rtx = ((code == NOT)
40660 ? gen_rtx_NOT (mode, op1)
40661 : gen_rtx_fmt_ee (code, mode, op1, op2));
40663 if (complement_final_p)
40664 bool_rtx = gen_rtx_NOT (mode, bool_rtx);
40666 emit_insn (gen_rtx_SET (dest, bool_rtx));
40669 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
40670 operations are split immediately during RTL generation to allow for more
40671 optimizations of the AND/IOR/XOR.
40673 OPERANDS is an array containing the destination and two input operands.
40674 CODE is the base operation (AND, IOR, XOR, NOT).
40675 MODE is the machine mode.
40676 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
40677 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
40678 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
40679 CLOBBER_REG is either NULL or a scratch register of type CC to allow
40680 formation of the AND instructions. */
40682 static void
40683 rs6000_split_logical_di (rtx operands[3],
40684 enum rtx_code code,
40685 bool complement_final_p,
40686 bool complement_op1_p,
40687 bool complement_op2_p)
40689 const HOST_WIDE_INT lower_32bits = HOST_WIDE_INT_C(0xffffffff);
40690 const HOST_WIDE_INT upper_32bits = ~ lower_32bits;
40691 const HOST_WIDE_INT sign_bit = HOST_WIDE_INT_C(0x80000000);
40692 enum hi_lo { hi = 0, lo = 1 };
40693 rtx op0_hi_lo[2], op1_hi_lo[2], op2_hi_lo[2];
40694 size_t i;
40696 op0_hi_lo[hi] = gen_highpart (SImode, operands[0]);
40697 op1_hi_lo[hi] = gen_highpart (SImode, operands[1]);
40698 op0_hi_lo[lo] = gen_lowpart (SImode, operands[0]);
40699 op1_hi_lo[lo] = gen_lowpart (SImode, operands[1]);
40701 if (code == NOT)
40702 op2_hi_lo[hi] = op2_hi_lo[lo] = NULL_RTX;
40703 else
40705 if (GET_CODE (operands[2]) != CONST_INT)
40707 op2_hi_lo[hi] = gen_highpart_mode (SImode, DImode, operands[2]);
40708 op2_hi_lo[lo] = gen_lowpart (SImode, operands[2]);
40710 else
40712 HOST_WIDE_INT value = INTVAL (operands[2]);
40713 HOST_WIDE_INT value_hi_lo[2];
40715 gcc_assert (!complement_final_p);
40716 gcc_assert (!complement_op1_p);
40717 gcc_assert (!complement_op2_p);
40719 value_hi_lo[hi] = value >> 32;
40720 value_hi_lo[lo] = value & lower_32bits;
40722 for (i = 0; i < 2; i++)
40724 HOST_WIDE_INT sub_value = value_hi_lo[i];
40726 if (sub_value & sign_bit)
40727 sub_value |= upper_32bits;
40729 op2_hi_lo[i] = GEN_INT (sub_value);
40731 /* If this is an AND instruction, check to see if we need to load
40732 the value in a register. */
40733 if (code == AND && sub_value != -1 && sub_value != 0
40734 && !and_operand (op2_hi_lo[i], SImode))
40735 op2_hi_lo[i] = force_reg (SImode, op2_hi_lo[i]);
40740 for (i = 0; i < 2; i++)
40742 /* Split large IOR/XOR operations. */
40743 if ((code == IOR || code == XOR)
40744 && GET_CODE (op2_hi_lo[i]) == CONST_INT
40745 && !complement_final_p
40746 && !complement_op1_p
40747 && !complement_op2_p
40748 && !logical_const_operand (op2_hi_lo[i], SImode))
40750 HOST_WIDE_INT value = INTVAL (op2_hi_lo[i]);
40751 HOST_WIDE_INT hi_16bits = value & HOST_WIDE_INT_C(0xffff0000);
40752 HOST_WIDE_INT lo_16bits = value & HOST_WIDE_INT_C(0x0000ffff);
40753 rtx tmp = gen_reg_rtx (SImode);
40755 /* Make sure the constant is sign extended. */
40756 if ((hi_16bits & sign_bit) != 0)
40757 hi_16bits |= upper_32bits;
40759 rs6000_split_logical_inner (tmp, op1_hi_lo[i], GEN_INT (hi_16bits),
40760 code, SImode, false, false, false);
40762 rs6000_split_logical_inner (op0_hi_lo[i], tmp, GEN_INT (lo_16bits),
40763 code, SImode, false, false, false);
40765 else
40766 rs6000_split_logical_inner (op0_hi_lo[i], op1_hi_lo[i], op2_hi_lo[i],
40767 code, SImode, complement_final_p,
40768 complement_op1_p, complement_op2_p);
40771 return;
40774 /* Split the insns that make up boolean operations operating on multiple GPR
40775 registers. The boolean MD patterns ensure that the inputs either are
40776 exactly the same as the output registers, or there is no overlap.
40778 OPERANDS is an array containing the destination and two input operands.
40779 CODE is the base operation (AND, IOR, XOR, NOT).
40780 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
40781 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
40782 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
40784 void
40785 rs6000_split_logical (rtx operands[3],
40786 enum rtx_code code,
40787 bool complement_final_p,
40788 bool complement_op1_p,
40789 bool complement_op2_p)
40791 machine_mode mode = GET_MODE (operands[0]);
40792 machine_mode sub_mode;
40793 rtx op0, op1, op2;
40794 int sub_size, regno0, regno1, nregs, i;
40796 /* If this is DImode, use the specialized version that can run before
40797 register allocation. */
40798 if (mode == DImode && !TARGET_POWERPC64)
40800 rs6000_split_logical_di (operands, code, complement_final_p,
40801 complement_op1_p, complement_op2_p);
40802 return;
40805 op0 = operands[0];
40806 op1 = operands[1];
40807 op2 = (code == NOT) ? NULL_RTX : operands[2];
40808 sub_mode = (TARGET_POWERPC64) ? DImode : SImode;
40809 sub_size = GET_MODE_SIZE (sub_mode);
40810 regno0 = REGNO (op0);
40811 regno1 = REGNO (op1);
40813 gcc_assert (reload_completed);
40814 gcc_assert (IN_RANGE (regno0, FIRST_GPR_REGNO, LAST_GPR_REGNO));
40815 gcc_assert (IN_RANGE (regno1, FIRST_GPR_REGNO, LAST_GPR_REGNO));
40817 nregs = rs6000_hard_regno_nregs[(int)mode][regno0];
40818 gcc_assert (nregs > 1);
40820 if (op2 && REG_P (op2))
40821 gcc_assert (IN_RANGE (REGNO (op2), FIRST_GPR_REGNO, LAST_GPR_REGNO));
40823 for (i = 0; i < nregs; i++)
40825 int offset = i * sub_size;
40826 rtx sub_op0 = simplify_subreg (sub_mode, op0, mode, offset);
40827 rtx sub_op1 = simplify_subreg (sub_mode, op1, mode, offset);
40828 rtx sub_op2 = ((code == NOT)
40829 ? NULL_RTX
40830 : simplify_subreg (sub_mode, op2, mode, offset));
40832 rs6000_split_logical_inner (sub_op0, sub_op1, sub_op2, code, sub_mode,
40833 complement_final_p, complement_op1_p,
40834 complement_op2_p);
40837 return;
40841 /* Return true if the peephole2 can combine a load involving a combination of
40842 an addis instruction and a load with an offset that can be fused together on
40843 a power8. */
40845 bool
40846 fusion_gpr_load_p (rtx addis_reg, /* register set via addis. */
40847 rtx addis_value, /* addis value. */
40848 rtx target, /* target register that is loaded. */
40849 rtx mem) /* bottom part of the memory addr. */
40851 rtx addr;
40852 rtx base_reg;
40854 /* Validate arguments. */
40855 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
40856 return false;
40858 if (!base_reg_operand (target, GET_MODE (target)))
40859 return false;
40861 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
40862 return false;
40864 /* Allow sign/zero extension. */
40865 if (GET_CODE (mem) == ZERO_EXTEND
40866 || (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN))
40867 mem = XEXP (mem, 0);
40869 if (!MEM_P (mem))
40870 return false;
40872 if (!fusion_gpr_mem_load (mem, GET_MODE (mem)))
40873 return false;
40875 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
40876 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
40877 return false;
40879 /* Validate that the register used to load the high value is either the
40880 register being loaded, or we can safely replace its use.
40882 This function is only called from the peephole2 pass and we assume that
40883 there are 2 instructions in the peephole (addis and load), so we want to
40884 check if the target register was not used in the memory address and the
40885 register to hold the addis result is dead after the peephole. */
40886 if (REGNO (addis_reg) != REGNO (target))
40888 if (reg_mentioned_p (target, mem))
40889 return false;
40891 if (!peep2_reg_dead_p (2, addis_reg))
40892 return false;
40894 /* If the target register being loaded is the stack pointer, we must
40895 avoid loading any other value into it, even temporarily. */
40896 if (REG_P (target) && REGNO (target) == STACK_POINTER_REGNUM)
40897 return false;
40900 base_reg = XEXP (addr, 0);
40901 return REGNO (addis_reg) == REGNO (base_reg);
40904 /* During the peephole2 pass, adjust and expand the insns for a load fusion
40905 sequence. We adjust the addis register to use the target register. If the
40906 load sign extends, we adjust the code to do the zero extending load, and an
40907 explicit sign extension later since the fusion only covers zero extending
40908 loads.
40910 The operands are:
40911 operands[0] register set with addis (to be replaced with target)
40912 operands[1] value set via addis
40913 operands[2] target register being loaded
40914 operands[3] D-form memory reference using operands[0]. */
40916 void
40917 expand_fusion_gpr_load (rtx *operands)
40919 rtx addis_value = operands[1];
40920 rtx target = operands[2];
40921 rtx orig_mem = operands[3];
40922 rtx new_addr, new_mem, orig_addr, offset;
40923 enum rtx_code plus_or_lo_sum;
40924 machine_mode target_mode = GET_MODE (target);
40925 machine_mode extend_mode = target_mode;
40926 machine_mode ptr_mode = Pmode;
40927 enum rtx_code extend = UNKNOWN;
40929 if (GET_CODE (orig_mem) == ZERO_EXTEND
40930 || (TARGET_P8_FUSION_SIGN && GET_CODE (orig_mem) == SIGN_EXTEND))
40932 extend = GET_CODE (orig_mem);
40933 orig_mem = XEXP (orig_mem, 0);
40934 target_mode = GET_MODE (orig_mem);
40937 gcc_assert (MEM_P (orig_mem));
40939 orig_addr = XEXP (orig_mem, 0);
40940 plus_or_lo_sum = GET_CODE (orig_addr);
40941 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
40943 offset = XEXP (orig_addr, 1);
40944 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
40945 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
40947 if (extend != UNKNOWN)
40948 new_mem = gen_rtx_fmt_e (ZERO_EXTEND, extend_mode, new_mem);
40950 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
40951 UNSPEC_FUSION_GPR);
40952 emit_insn (gen_rtx_SET (target, new_mem));
40954 if (extend == SIGN_EXTEND)
40956 int sub_off = ((BYTES_BIG_ENDIAN)
40957 ? GET_MODE_SIZE (extend_mode) - GET_MODE_SIZE (target_mode)
40958 : 0);
40959 rtx sign_reg
40960 = simplify_subreg (target_mode, target, extend_mode, sub_off);
40962 emit_insn (gen_rtx_SET (target,
40963 gen_rtx_SIGN_EXTEND (extend_mode, sign_reg)));
40966 return;
40969 /* Emit the addis instruction that will be part of a fused instruction
40970 sequence. */
40972 void
40973 emit_fusion_addis (rtx target, rtx addis_value, const char *comment,
40974 const char *mode_name)
40976 rtx fuse_ops[10];
40977 char insn_template[80];
40978 const char *addis_str = NULL;
40979 const char *comment_str = ASM_COMMENT_START;
40981 if (*comment_str == ' ')
40982 comment_str++;
40984 /* Emit the addis instruction. */
40985 fuse_ops[0] = target;
40986 if (satisfies_constraint_L (addis_value))
40988 fuse_ops[1] = addis_value;
40989 addis_str = "lis %0,%v1";
40992 else if (GET_CODE (addis_value) == PLUS)
40994 rtx op0 = XEXP (addis_value, 0);
40995 rtx op1 = XEXP (addis_value, 1);
40997 if (REG_P (op0) && CONST_INT_P (op1)
40998 && satisfies_constraint_L (op1))
41000 fuse_ops[1] = op0;
41001 fuse_ops[2] = op1;
41002 addis_str = "addis %0,%1,%v2";
41006 else if (GET_CODE (addis_value) == HIGH)
41008 rtx value = XEXP (addis_value, 0);
41009 if (GET_CODE (value) == UNSPEC && XINT (value, 1) == UNSPEC_TOCREL)
41011 fuse_ops[1] = XVECEXP (value, 0, 0); /* symbol ref. */
41012 fuse_ops[2] = XVECEXP (value, 0, 1); /* TOC register. */
41013 if (TARGET_ELF)
41014 addis_str = "addis %0,%2,%1@toc@ha";
41016 else if (TARGET_XCOFF)
41017 addis_str = "addis %0,%1@u(%2)";
41019 else
41020 gcc_unreachable ();
41023 else if (GET_CODE (value) == PLUS)
41025 rtx op0 = XEXP (value, 0);
41026 rtx op1 = XEXP (value, 1);
41028 if (GET_CODE (op0) == UNSPEC
41029 && XINT (op0, 1) == UNSPEC_TOCREL
41030 && CONST_INT_P (op1))
41032 fuse_ops[1] = XVECEXP (op0, 0, 0); /* symbol ref. */
41033 fuse_ops[2] = XVECEXP (op0, 0, 1); /* TOC register. */
41034 fuse_ops[3] = op1;
41035 if (TARGET_ELF)
41036 addis_str = "addis %0,%2,%1+%3@toc@ha";
41038 else if (TARGET_XCOFF)
41039 addis_str = "addis %0,%1+%3@u(%2)";
41041 else
41042 gcc_unreachable ();
41046 else if (satisfies_constraint_L (value))
41048 fuse_ops[1] = value;
41049 addis_str = "lis %0,%v1";
41052 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (value))
41054 fuse_ops[1] = value;
41055 addis_str = "lis %0,%1@ha";
41059 if (!addis_str)
41060 fatal_insn ("Could not generate addis value for fusion", addis_value);
41062 sprintf (insn_template, "%s\t\t%s %s, type %s", addis_str, comment_str,
41063 comment, mode_name);
41064 output_asm_insn (insn_template, fuse_ops);
41067 /* Emit a D-form load or store instruction that is the second instruction
41068 of a fusion sequence. */
41070 void
41071 emit_fusion_load_store (rtx load_store_reg, rtx addis_reg, rtx offset,
41072 const char *insn_str)
41074 rtx fuse_ops[10];
41075 char insn_template[80];
41077 fuse_ops[0] = load_store_reg;
41078 fuse_ops[1] = addis_reg;
41080 if (CONST_INT_P (offset) && satisfies_constraint_I (offset))
41082 sprintf (insn_template, "%s %%0,%%2(%%1)", insn_str);
41083 fuse_ops[2] = offset;
41084 output_asm_insn (insn_template, fuse_ops);
41087 else if (GET_CODE (offset) == UNSPEC
41088 && XINT (offset, 1) == UNSPEC_TOCREL)
41090 if (TARGET_ELF)
41091 sprintf (insn_template, "%s %%0,%%2@toc@l(%%1)", insn_str);
41093 else if (TARGET_XCOFF)
41094 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
41096 else
41097 gcc_unreachable ();
41099 fuse_ops[2] = XVECEXP (offset, 0, 0);
41100 output_asm_insn (insn_template, fuse_ops);
41103 else if (GET_CODE (offset) == PLUS
41104 && GET_CODE (XEXP (offset, 0)) == UNSPEC
41105 && XINT (XEXP (offset, 0), 1) == UNSPEC_TOCREL
41106 && CONST_INT_P (XEXP (offset, 1)))
41108 rtx tocrel_unspec = XEXP (offset, 0);
41109 if (TARGET_ELF)
41110 sprintf (insn_template, "%s %%0,%%2+%%3@toc@l(%%1)", insn_str);
41112 else if (TARGET_XCOFF)
41113 sprintf (insn_template, "%s %%0,%%2+%%3@l(%%1)", insn_str);
41115 else
41116 gcc_unreachable ();
41118 fuse_ops[2] = XVECEXP (tocrel_unspec, 0, 0);
41119 fuse_ops[3] = XEXP (offset, 1);
41120 output_asm_insn (insn_template, fuse_ops);
41123 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (offset))
41125 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
41127 fuse_ops[2] = offset;
41128 output_asm_insn (insn_template, fuse_ops);
41131 else
41132 fatal_insn ("Unable to generate load/store offset for fusion", offset);
41134 return;
41137 /* Wrap a TOC address that can be fused to indicate that special fusion
41138 processing is needed. */
41141 fusion_wrap_memory_address (rtx old_mem)
41143 rtx old_addr = XEXP (old_mem, 0);
41144 rtvec v = gen_rtvec (1, old_addr);
41145 rtx new_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_FUSION_ADDIS);
41146 return replace_equiv_address_nv (old_mem, new_addr, false);
41149 /* Given an address, convert it into the addis and load offset parts. Addresses
41150 created during the peephole2 process look like:
41151 (lo_sum (high (unspec [(sym)] UNSPEC_TOCREL))
41152 (unspec [(...)] UNSPEC_TOCREL))
41154 Addresses created via toc fusion look like:
41155 (unspec [(unspec [(...)] UNSPEC_TOCREL)] UNSPEC_FUSION_ADDIS)) */
41157 static void
41158 fusion_split_address (rtx addr, rtx *p_hi, rtx *p_lo)
41160 rtx hi, lo;
41162 if (GET_CODE (addr) == UNSPEC && XINT (addr, 1) == UNSPEC_FUSION_ADDIS)
41164 lo = XVECEXP (addr, 0, 0);
41165 hi = gen_rtx_HIGH (Pmode, lo);
41167 else if (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
41169 hi = XEXP (addr, 0);
41170 lo = XEXP (addr, 1);
41172 else
41173 gcc_unreachable ();
41175 *p_hi = hi;
41176 *p_lo = lo;
41179 /* Return a string to fuse an addis instruction with a gpr load to the same
41180 register that we loaded up the addis instruction. The address that is used
41181 is the logical address that was formed during peephole2:
41182 (lo_sum (high) (low-part))
41184 Or the address is the TOC address that is wrapped before register allocation:
41185 (unspec [(addr) (toc-reg)] UNSPEC_FUSION_ADDIS)
41187 The code is complicated, so we call output_asm_insn directly, and just
41188 return "". */
41190 const char *
41191 emit_fusion_gpr_load (rtx target, rtx mem)
41193 rtx addis_value;
41194 rtx addr;
41195 rtx load_offset;
41196 const char *load_str = NULL;
41197 const char *mode_name = NULL;
41198 machine_mode mode;
41200 if (GET_CODE (mem) == ZERO_EXTEND)
41201 mem = XEXP (mem, 0);
41203 gcc_assert (REG_P (target) && MEM_P (mem));
41205 addr = XEXP (mem, 0);
41206 fusion_split_address (addr, &addis_value, &load_offset);
41208 /* Now emit the load instruction to the same register. */
41209 mode = GET_MODE (mem);
41210 switch (mode)
41212 case QImode:
41213 mode_name = "char";
41214 load_str = "lbz";
41215 break;
41217 case HImode:
41218 mode_name = "short";
41219 load_str = "lhz";
41220 break;
41222 case SImode:
41223 case SFmode:
41224 mode_name = (mode == SFmode) ? "float" : "int";
41225 load_str = "lwz";
41226 break;
41228 case DImode:
41229 case DFmode:
41230 gcc_assert (TARGET_POWERPC64);
41231 mode_name = (mode == DFmode) ? "double" : "long";
41232 load_str = "ld";
41233 break;
41235 default:
41236 fatal_insn ("Bad GPR fusion", gen_rtx_SET (target, mem));
41239 /* Emit the addis instruction. */
41240 emit_fusion_addis (target, addis_value, "gpr load fusion", mode_name);
41242 /* Emit the D-form load instruction. */
41243 emit_fusion_load_store (target, target, load_offset, load_str);
41245 return "";
41249 /* Return true if the peephole2 can combine a load/store involving a
41250 combination of an addis instruction and the memory operation. This was
41251 added to the ISA 3.0 (power9) hardware. */
41253 bool
41254 fusion_p9_p (rtx addis_reg, /* register set via addis. */
41255 rtx addis_value, /* addis value. */
41256 rtx dest, /* destination (memory or register). */
41257 rtx src) /* source (register or memory). */
41259 rtx addr, mem, offset;
41260 enum machine_mode mode = GET_MODE (src);
41262 /* Validate arguments. */
41263 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
41264 return false;
41266 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
41267 return false;
41269 /* Ignore extend operations that are part of the load. */
41270 if (GET_CODE (src) == FLOAT_EXTEND || GET_CODE (src) == ZERO_EXTEND)
41271 src = XEXP (src, 0);
41273 /* Test for memory<-register or register<-memory. */
41274 if (fpr_reg_operand (src, mode) || int_reg_operand (src, mode))
41276 if (!MEM_P (dest))
41277 return false;
41279 mem = dest;
41282 else if (MEM_P (src))
41284 if (!fpr_reg_operand (dest, mode) && !int_reg_operand (dest, mode))
41285 return false;
41287 mem = src;
41290 else
41291 return false;
41293 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
41294 if (GET_CODE (addr) == PLUS)
41296 if (!rtx_equal_p (addis_reg, XEXP (addr, 0)))
41297 return false;
41299 return satisfies_constraint_I (XEXP (addr, 1));
41302 else if (GET_CODE (addr) == LO_SUM)
41304 if (!rtx_equal_p (addis_reg, XEXP (addr, 0)))
41305 return false;
41307 offset = XEXP (addr, 1);
41308 if (TARGET_XCOFF || (TARGET_ELF && TARGET_POWERPC64))
41309 return small_toc_ref (offset, GET_MODE (offset));
41311 else if (TARGET_ELF && !TARGET_POWERPC64)
41312 return CONSTANT_P (offset);
41315 return false;
41318 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
41319 load sequence.
41321 The operands are:
41322 operands[0] register set with addis
41323 operands[1] value set via addis
41324 operands[2] target register being loaded
41325 operands[3] D-form memory reference using operands[0].
41327 This is similar to the fusion introduced with power8, except it scales to
41328 both loads/stores and does not require the result register to be the same as
41329 the base register. At the moment, we only do this if register set with addis
41330 is dead. */
41332 void
41333 expand_fusion_p9_load (rtx *operands)
41335 rtx tmp_reg = operands[0];
41336 rtx addis_value = operands[1];
41337 rtx target = operands[2];
41338 rtx orig_mem = operands[3];
41339 rtx new_addr, new_mem, orig_addr, offset, set, clobber, insn;
41340 enum rtx_code plus_or_lo_sum;
41341 machine_mode target_mode = GET_MODE (target);
41342 machine_mode extend_mode = target_mode;
41343 machine_mode ptr_mode = Pmode;
41344 enum rtx_code extend = UNKNOWN;
41346 if (GET_CODE (orig_mem) == FLOAT_EXTEND || GET_CODE (orig_mem) == ZERO_EXTEND)
41348 extend = GET_CODE (orig_mem);
41349 orig_mem = XEXP (orig_mem, 0);
41350 target_mode = GET_MODE (orig_mem);
41353 gcc_assert (MEM_P (orig_mem));
41355 orig_addr = XEXP (orig_mem, 0);
41356 plus_or_lo_sum = GET_CODE (orig_addr);
41357 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
41359 offset = XEXP (orig_addr, 1);
41360 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
41361 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
41363 if (extend != UNKNOWN)
41364 new_mem = gen_rtx_fmt_e (extend, extend_mode, new_mem);
41366 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
41367 UNSPEC_FUSION_P9);
41369 set = gen_rtx_SET (target, new_mem);
41370 clobber = gen_rtx_CLOBBER (VOIDmode, tmp_reg);
41371 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber));
41372 emit_insn (insn);
41374 return;
41377 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
41378 store sequence.
41380 The operands are:
41381 operands[0] register set with addis
41382 operands[1] value set via addis
41383 operands[2] target D-form memory being stored to
41384 operands[3] register being stored
41386 This is similar to the fusion introduced with power8, except it scales to
41387 both loads/stores and does not require the result register to be the same as
41388 the base register. At the moment, we only do this if register set with addis
41389 is dead. */
41391 void
41392 expand_fusion_p9_store (rtx *operands)
41394 rtx tmp_reg = operands[0];
41395 rtx addis_value = operands[1];
41396 rtx orig_mem = operands[2];
41397 rtx src = operands[3];
41398 rtx new_addr, new_mem, orig_addr, offset, set, clobber, insn, new_src;
41399 enum rtx_code plus_or_lo_sum;
41400 machine_mode target_mode = GET_MODE (orig_mem);
41401 machine_mode ptr_mode = Pmode;
41403 gcc_assert (MEM_P (orig_mem));
41405 orig_addr = XEXP (orig_mem, 0);
41406 plus_or_lo_sum = GET_CODE (orig_addr);
41407 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
41409 offset = XEXP (orig_addr, 1);
41410 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
41411 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
41413 new_src = gen_rtx_UNSPEC (target_mode, gen_rtvec (1, src),
41414 UNSPEC_FUSION_P9);
41416 set = gen_rtx_SET (new_mem, new_src);
41417 clobber = gen_rtx_CLOBBER (VOIDmode, tmp_reg);
41418 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber));
41419 emit_insn (insn);
41421 return;
41424 /* Return a string to fuse an addis instruction with a load using extended
41425 fusion. The address that is used is the logical address that was formed
41426 during peephole2: (lo_sum (high) (low-part))
41428 The code is complicated, so we call output_asm_insn directly, and just
41429 return "". */
41431 const char *
41432 emit_fusion_p9_load (rtx reg, rtx mem, rtx tmp_reg)
41434 enum machine_mode mode = GET_MODE (reg);
41435 rtx hi;
41436 rtx lo;
41437 rtx addr;
41438 const char *load_string;
41439 int r;
41441 if (GET_CODE (mem) == FLOAT_EXTEND || GET_CODE (mem) == ZERO_EXTEND)
41443 mem = XEXP (mem, 0);
41444 mode = GET_MODE (mem);
41447 if (GET_CODE (reg) == SUBREG)
41449 gcc_assert (SUBREG_BYTE (reg) == 0);
41450 reg = SUBREG_REG (reg);
41453 if (!REG_P (reg))
41454 fatal_insn ("emit_fusion_p9_load, bad reg #1", reg);
41456 r = REGNO (reg);
41457 if (FP_REGNO_P (r))
41459 if (mode == SFmode)
41460 load_string = "lfs";
41461 else if (mode == DFmode || mode == DImode)
41462 load_string = "lfd";
41463 else
41464 gcc_unreachable ();
41466 else if (ALTIVEC_REGNO_P (r) && TARGET_P9_DFORM_SCALAR)
41468 if (mode == SFmode)
41469 load_string = "lxssp";
41470 else if (mode == DFmode || mode == DImode)
41471 load_string = "lxsd";
41472 else
41473 gcc_unreachable ();
41475 else if (INT_REGNO_P (r))
41477 switch (mode)
41479 case QImode:
41480 load_string = "lbz";
41481 break;
41482 case HImode:
41483 load_string = "lhz";
41484 break;
41485 case SImode:
41486 case SFmode:
41487 load_string = "lwz";
41488 break;
41489 case DImode:
41490 case DFmode:
41491 if (!TARGET_POWERPC64)
41492 gcc_unreachable ();
41493 load_string = "ld";
41494 break;
41495 default:
41496 gcc_unreachable ();
41499 else
41500 fatal_insn ("emit_fusion_p9_load, bad reg #2", reg);
41502 if (!MEM_P (mem))
41503 fatal_insn ("emit_fusion_p9_load not MEM", mem);
41505 addr = XEXP (mem, 0);
41506 fusion_split_address (addr, &hi, &lo);
41508 /* Emit the addis instruction. */
41509 emit_fusion_addis (tmp_reg, hi, "power9 load fusion", GET_MODE_NAME (mode));
41511 /* Emit the D-form load instruction. */
41512 emit_fusion_load_store (reg, tmp_reg, lo, load_string);
41514 return "";
41517 /* Return a string to fuse an addis instruction with a store using extended
41518 fusion. The address that is used is the logical address that was formed
41519 during peephole2: (lo_sum (high) (low-part))
41521 The code is complicated, so we call output_asm_insn directly, and just
41522 return "". */
41524 const char *
41525 emit_fusion_p9_store (rtx mem, rtx reg, rtx tmp_reg)
41527 enum machine_mode mode = GET_MODE (reg);
41528 rtx hi;
41529 rtx lo;
41530 rtx addr;
41531 const char *store_string;
41532 int r;
41534 if (GET_CODE (reg) == SUBREG)
41536 gcc_assert (SUBREG_BYTE (reg) == 0);
41537 reg = SUBREG_REG (reg);
41540 if (!REG_P (reg))
41541 fatal_insn ("emit_fusion_p9_store, bad reg #1", reg);
41543 r = REGNO (reg);
41544 if (FP_REGNO_P (r))
41546 if (mode == SFmode)
41547 store_string = "stfs";
41548 else if (mode == DFmode)
41549 store_string = "stfd";
41550 else
41551 gcc_unreachable ();
41553 else if (ALTIVEC_REGNO_P (r) && TARGET_P9_DFORM_SCALAR)
41555 if (mode == SFmode)
41556 store_string = "stxssp";
41557 else if (mode == DFmode || mode == DImode)
41558 store_string = "stxsd";
41559 else
41560 gcc_unreachable ();
41562 else if (INT_REGNO_P (r))
41564 switch (mode)
41566 case QImode:
41567 store_string = "stb";
41568 break;
41569 case HImode:
41570 store_string = "sth";
41571 break;
41572 case SImode:
41573 case SFmode:
41574 store_string = "stw";
41575 break;
41576 case DImode:
41577 case DFmode:
41578 if (!TARGET_POWERPC64)
41579 gcc_unreachable ();
41580 store_string = "std";
41581 break;
41582 default:
41583 gcc_unreachable ();
41586 else
41587 fatal_insn ("emit_fusion_p9_store, bad reg #2", reg);
41589 if (!MEM_P (mem))
41590 fatal_insn ("emit_fusion_p9_store not MEM", mem);
41592 addr = XEXP (mem, 0);
41593 fusion_split_address (addr, &hi, &lo);
41595 /* Emit the addis instruction. */
41596 emit_fusion_addis (tmp_reg, hi, "power9 store fusion", GET_MODE_NAME (mode));
41598 /* Emit the D-form load instruction. */
41599 emit_fusion_load_store (reg, tmp_reg, lo, store_string);
41601 return "";
41605 /* Analyze vector computations and remove unnecessary doubleword
41606 swaps (xxswapdi instructions). This pass is performed only
41607 for little-endian VSX code generation.
41609 For this specific case, loads and stores of 4x32 and 2x64 vectors
41610 are inefficient. These are implemented using the lvx2dx and
41611 stvx2dx instructions, which invert the order of doublewords in
41612 a vector register. Thus the code generation inserts an xxswapdi
41613 after each such load, and prior to each such store. (For spill
41614 code after register assignment, an additional xxswapdi is inserted
41615 following each store in order to return a hard register to its
41616 unpermuted value.)
41618 The extra xxswapdi instructions reduce performance. This can be
41619 particularly bad for vectorized code. The purpose of this pass
41620 is to reduce the number of xxswapdi instructions required for
41621 correctness.
41623 The primary insight is that much code that operates on vectors
41624 does not care about the relative order of elements in a register,
41625 so long as the correct memory order is preserved. If we have
41626 a computation where all input values are provided by lvxd2x/xxswapdi
41627 sequences, all outputs are stored using xxswapdi/stvxd2x sequences,
41628 and all intermediate computations are pure SIMD (independent of
41629 element order), then all the xxswapdi's associated with the loads
41630 and stores may be removed.
41632 This pass uses some of the infrastructure and logical ideas from
41633 the "web" pass in web.c. We create maximal webs of computations
41634 fitting the description above using union-find. Each such web is
41635 then optimized by removing its unnecessary xxswapdi instructions.
41637 The pass is placed prior to global optimization so that we can
41638 perform the optimization in the safest and simplest way possible;
41639 that is, by replacing each xxswapdi insn with a register copy insn.
41640 Subsequent forward propagation will remove copies where possible.
41642 There are some operations sensitive to element order for which we
41643 can still allow the operation, provided we modify those operations.
41644 These include CONST_VECTORs, for which we must swap the first and
41645 second halves of the constant vector; and SUBREGs, for which we
41646 must adjust the byte offset to account for the swapped doublewords.
41647 A remaining opportunity would be non-immediate-form splats, for
41648 which we should adjust the selected lane of the input. We should
41649 also make code generation adjustments for sum-across operations,
41650 since this is a common vectorizer reduction.
41652 Because we run prior to the first split, we can see loads and stores
41653 here that match *vsx_le_perm_{load,store}_<mode>. These are vanilla
41654 vector loads and stores that have not yet been split into a permuting
41655 load/store and a swap. (One way this can happen is with a builtin
41656 call to vec_vsx_{ld,st}.) We can handle these as well, but rather
41657 than deleting a swap, we convert the load/store into a permuting
41658 load/store (which effectively removes the swap). */
41660 /* Notes on Permutes
41662 We do not currently handle computations that contain permutes. There
41663 is a general transformation that can be performed correctly, but it
41664 may introduce more expensive code than it replaces. To handle these
41665 would require a cost model to determine when to perform the optimization.
41666 This commentary records how this could be done if desired.
41668 The most general permute is something like this (example for V16QI):
41670 (vec_select:V16QI (vec_concat:V32QI (op1:V16QI) (op2:V16QI))
41671 (parallel [(const_int a0) (const_int a1)
41673 (const_int a14) (const_int a15)]))
41675 where a0,...,a15 are in [0,31] and select elements from op1 and op2
41676 to produce in the result.
41678 Regardless of mode, we can convert the PARALLEL to a mask of 16
41679 byte-element selectors. Let's call this M, with M[i] representing
41680 the ith byte-element selector value. Then if we swap doublewords
41681 throughout the computation, we can get correct behavior by replacing
41682 M with M' as follows:
41684 M'[i] = { (M[i]+8)%16 : M[i] in [0,15]
41685 { ((M[i]+8)%16)+16 : M[i] in [16,31]
41687 This seems promising at first, since we are just replacing one mask
41688 with another. But certain masks are preferable to others. If M
41689 is a mask that matches a vmrghh pattern, for example, M' certainly
41690 will not. Instead of a single vmrghh, we would generate a load of
41691 M' and a vperm. So we would need to know how many xxswapd's we can
41692 remove as a result of this transformation to determine if it's
41693 profitable; and preferably the logic would need to be aware of all
41694 the special preferable masks.
41696 Another form of permute is an UNSPEC_VPERM, in which the mask is
41697 already in a register. In some cases, this mask may be a constant
41698 that we can discover with ud-chains, in which case the above
41699 transformation is ok. However, the common usage here is for the
41700 mask to be produced by an UNSPEC_LVSL, in which case the mask
41701 cannot be known at compile time. In such a case we would have to
41702 generate several instructions to compute M' as above at run time,
41703 and a cost model is needed again.
41705 However, when the mask M for an UNSPEC_VPERM is loaded from the
41706 constant pool, we can replace M with M' as above at no cost
41707 beyond adding a constant pool entry. */
41709 /* This is based on the union-find logic in web.c. web_entry_base is
41710 defined in df.h. */
41711 class swap_web_entry : public web_entry_base
41713 public:
41714 /* Pointer to the insn. */
41715 rtx_insn *insn;
41716 /* Set if insn contains a mention of a vector register. All other
41717 fields are undefined if this field is unset. */
41718 unsigned int is_relevant : 1;
41719 /* Set if insn is a load. */
41720 unsigned int is_load : 1;
41721 /* Set if insn is a store. */
41722 unsigned int is_store : 1;
41723 /* Set if insn is a doubleword swap. This can either be a register swap
41724 or a permuting load or store (test is_load and is_store for this). */
41725 unsigned int is_swap : 1;
41726 /* Set if the insn has a live-in use of a parameter register. */
41727 unsigned int is_live_in : 1;
41728 /* Set if the insn has a live-out def of a return register. */
41729 unsigned int is_live_out : 1;
41730 /* Set if the insn contains a subreg reference of a vector register. */
41731 unsigned int contains_subreg : 1;
41732 /* Set if the insn contains a 128-bit integer operand. */
41733 unsigned int is_128_int : 1;
41734 /* Set if this is a call-insn. */
41735 unsigned int is_call : 1;
41736 /* Set if this insn does not perform a vector operation for which
41737 element order matters, or if we know how to fix it up if it does.
41738 Undefined if is_swap is set. */
41739 unsigned int is_swappable : 1;
41740 /* A nonzero value indicates what kind of special handling for this
41741 insn is required if doublewords are swapped. Undefined if
41742 is_swappable is not set. */
41743 unsigned int special_handling : 4;
41744 /* Set if the web represented by this entry cannot be optimized. */
41745 unsigned int web_not_optimizable : 1;
41746 /* Set if this insn should be deleted. */
41747 unsigned int will_delete : 1;
41750 enum special_handling_values {
41751 SH_NONE = 0,
41752 SH_CONST_VECTOR,
41753 SH_SUBREG,
41754 SH_NOSWAP_LD,
41755 SH_NOSWAP_ST,
41756 SH_EXTRACT,
41757 SH_SPLAT,
41758 SH_XXPERMDI,
41759 SH_CONCAT,
41760 SH_VPERM
41763 /* Union INSN with all insns containing definitions that reach USE.
41764 Detect whether USE is live-in to the current function. */
41765 static void
41766 union_defs (swap_web_entry *insn_entry, rtx insn, df_ref use)
41768 struct df_link *link = DF_REF_CHAIN (use);
41770 if (!link)
41771 insn_entry[INSN_UID (insn)].is_live_in = 1;
41773 while (link)
41775 if (DF_REF_IS_ARTIFICIAL (link->ref))
41776 insn_entry[INSN_UID (insn)].is_live_in = 1;
41778 if (DF_REF_INSN_INFO (link->ref))
41780 rtx def_insn = DF_REF_INSN (link->ref);
41781 (void)unionfind_union (insn_entry + INSN_UID (insn),
41782 insn_entry + INSN_UID (def_insn));
41785 link = link->next;
41789 /* Union INSN with all insns containing uses reached from DEF.
41790 Detect whether DEF is live-out from the current function. */
41791 static void
41792 union_uses (swap_web_entry *insn_entry, rtx insn, df_ref def)
41794 struct df_link *link = DF_REF_CHAIN (def);
41796 if (!link)
41797 insn_entry[INSN_UID (insn)].is_live_out = 1;
41799 while (link)
41801 /* This could be an eh use or some other artificial use;
41802 we treat these all the same (killing the optimization). */
41803 if (DF_REF_IS_ARTIFICIAL (link->ref))
41804 insn_entry[INSN_UID (insn)].is_live_out = 1;
41806 if (DF_REF_INSN_INFO (link->ref))
41808 rtx use_insn = DF_REF_INSN (link->ref);
41809 (void)unionfind_union (insn_entry + INSN_UID (insn),
41810 insn_entry + INSN_UID (use_insn));
41813 link = link->next;
41817 /* Return 1 iff INSN is a load insn, including permuting loads that
41818 represent an lvxd2x instruction; else return 0. */
41819 static unsigned int
41820 insn_is_load_p (rtx insn)
41822 rtx body = PATTERN (insn);
41824 if (GET_CODE (body) == SET)
41826 if (GET_CODE (SET_SRC (body)) == MEM)
41827 return 1;
41829 if (GET_CODE (SET_SRC (body)) == VEC_SELECT
41830 && GET_CODE (XEXP (SET_SRC (body), 0)) == MEM)
41831 return 1;
41833 return 0;
41836 if (GET_CODE (body) != PARALLEL)
41837 return 0;
41839 rtx set = XVECEXP (body, 0, 0);
41841 if (GET_CODE (set) == SET && GET_CODE (SET_SRC (set)) == MEM)
41842 return 1;
41844 return 0;
41847 /* Return 1 iff INSN is a store insn, including permuting stores that
41848 represent an stvxd2x instruction; else return 0. */
41849 static unsigned int
41850 insn_is_store_p (rtx insn)
41852 rtx body = PATTERN (insn);
41853 if (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == MEM)
41854 return 1;
41855 if (GET_CODE (body) != PARALLEL)
41856 return 0;
41857 rtx set = XVECEXP (body, 0, 0);
41858 if (GET_CODE (set) == SET && GET_CODE (SET_DEST (set)) == MEM)
41859 return 1;
41860 return 0;
41863 /* Return 1 iff INSN swaps doublewords. This may be a reg-reg swap,
41864 a permuting load, or a permuting store. */
41865 static unsigned int
41866 insn_is_swap_p (rtx insn)
41868 rtx body = PATTERN (insn);
41869 if (GET_CODE (body) != SET)
41870 return 0;
41871 rtx rhs = SET_SRC (body);
41872 if (GET_CODE (rhs) != VEC_SELECT)
41873 return 0;
41874 rtx parallel = XEXP (rhs, 1);
41875 if (GET_CODE (parallel) != PARALLEL)
41876 return 0;
41877 unsigned int len = XVECLEN (parallel, 0);
41878 if (len != 2 && len != 4 && len != 8 && len != 16)
41879 return 0;
41880 for (unsigned int i = 0; i < len / 2; ++i)
41882 rtx op = XVECEXP (parallel, 0, i);
41883 if (GET_CODE (op) != CONST_INT || INTVAL (op) != len / 2 + i)
41884 return 0;
41886 for (unsigned int i = len / 2; i < len; ++i)
41888 rtx op = XVECEXP (parallel, 0, i);
41889 if (GET_CODE (op) != CONST_INT || INTVAL (op) != i - len / 2)
41890 return 0;
41892 return 1;
41895 /* Return TRUE if insn is a swap fed by a load from the constant pool. */
41896 static bool
41897 const_load_sequence_p (swap_web_entry *insn_entry, rtx insn)
41899 unsigned uid = INSN_UID (insn);
41900 if (!insn_entry[uid].is_swap || insn_entry[uid].is_load)
41901 return false;
41903 /* Find the unique use in the swap and locate its def. If the def
41904 isn't unique, punt. */
41905 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
41906 df_ref use;
41907 FOR_EACH_INSN_INFO_USE (use, insn_info)
41909 struct df_link *def_link = DF_REF_CHAIN (use);
41910 if (!def_link || def_link->next)
41911 return false;
41913 rtx def_insn = DF_REF_INSN (def_link->ref);
41914 unsigned uid2 = INSN_UID (def_insn);
41915 if (!insn_entry[uid2].is_load || !insn_entry[uid2].is_swap)
41916 return false;
41918 rtx body = PATTERN (def_insn);
41919 if (GET_CODE (body) != SET
41920 || GET_CODE (SET_SRC (body)) != VEC_SELECT
41921 || GET_CODE (XEXP (SET_SRC (body), 0)) != MEM)
41922 return false;
41924 rtx mem = XEXP (SET_SRC (body), 0);
41925 rtx base_reg = XEXP (mem, 0);
41927 df_ref base_use;
41928 insn_info = DF_INSN_INFO_GET (def_insn);
41929 FOR_EACH_INSN_INFO_USE (base_use, insn_info)
41931 if (!rtx_equal_p (DF_REF_REG (base_use), base_reg))
41932 continue;
41934 struct df_link *base_def_link = DF_REF_CHAIN (base_use);
41935 if (!base_def_link || base_def_link->next)
41936 return false;
41938 rtx tocrel_insn = DF_REF_INSN (base_def_link->ref);
41939 rtx tocrel_body = PATTERN (tocrel_insn);
41940 rtx base, offset;
41941 if (GET_CODE (tocrel_body) != SET)
41942 return false;
41943 /* There is an extra level of indirection for small/large
41944 code models. */
41945 rtx tocrel_expr = SET_SRC (tocrel_body);
41946 if (GET_CODE (tocrel_expr) == MEM)
41947 tocrel_expr = XEXP (tocrel_expr, 0);
41948 if (!toc_relative_expr_p (tocrel_expr, false))
41949 return false;
41950 split_const (XVECEXP (tocrel_base, 0, 0), &base, &offset);
41951 if (GET_CODE (base) != SYMBOL_REF || !CONSTANT_POOL_ADDRESS_P (base))
41952 return false;
41955 return true;
41958 /* Return TRUE iff OP matches a V2DF reduction pattern. See the
41959 definition of vsx_reduc_<VEC_reduc_name>_v2df in vsx.md. */
41960 static bool
41961 v2df_reduction_p (rtx op)
41963 if (GET_MODE (op) != V2DFmode)
41964 return false;
41966 enum rtx_code code = GET_CODE (op);
41967 if (code != PLUS && code != SMIN && code != SMAX)
41968 return false;
41970 rtx concat = XEXP (op, 0);
41971 if (GET_CODE (concat) != VEC_CONCAT)
41972 return false;
41974 rtx select0 = XEXP (concat, 0);
41975 rtx select1 = XEXP (concat, 1);
41976 if (GET_CODE (select0) != VEC_SELECT || GET_CODE (select1) != VEC_SELECT)
41977 return false;
41979 rtx reg0 = XEXP (select0, 0);
41980 rtx reg1 = XEXP (select1, 0);
41981 if (!rtx_equal_p (reg0, reg1) || !REG_P (reg0))
41982 return false;
41984 rtx parallel0 = XEXP (select0, 1);
41985 rtx parallel1 = XEXP (select1, 1);
41986 if (GET_CODE (parallel0) != PARALLEL || GET_CODE (parallel1) != PARALLEL)
41987 return false;
41989 if (!rtx_equal_p (XVECEXP (parallel0, 0, 0), const1_rtx)
41990 || !rtx_equal_p (XVECEXP (parallel1, 0, 0), const0_rtx))
41991 return false;
41993 return true;
41996 /* Return 1 iff OP is an operand that will not be affected by having
41997 vector doublewords swapped in memory. */
41998 static unsigned int
41999 rtx_is_swappable_p (rtx op, unsigned int *special)
42001 enum rtx_code code = GET_CODE (op);
42002 int i, j;
42003 rtx parallel;
42005 switch (code)
42007 case LABEL_REF:
42008 case SYMBOL_REF:
42009 case CLOBBER:
42010 case REG:
42011 return 1;
42013 case VEC_CONCAT:
42014 case ASM_INPUT:
42015 case ASM_OPERANDS:
42016 return 0;
42018 case CONST_VECTOR:
42020 *special = SH_CONST_VECTOR;
42021 return 1;
42024 case VEC_DUPLICATE:
42025 /* Opportunity: If XEXP (op, 0) has the same mode as the result,
42026 and XEXP (op, 1) is a PARALLEL with a single QImode const int,
42027 it represents a vector splat for which we can do special
42028 handling. */
42029 if (GET_CODE (XEXP (op, 0)) == CONST_INT)
42030 return 1;
42031 else if (REG_P (XEXP (op, 0))
42032 && GET_MODE_INNER (GET_MODE (op)) == GET_MODE (XEXP (op, 0)))
42033 /* This catches V2DF and V2DI splat, at a minimum. */
42034 return 1;
42035 else if (GET_CODE (XEXP (op, 0)) == TRUNCATE
42036 && REG_P (XEXP (XEXP (op, 0), 0))
42037 && GET_MODE_INNER (GET_MODE (op)) == GET_MODE (XEXP (op, 0)))
42038 /* This catches splat of a truncated value. */
42039 return 1;
42040 else if (GET_CODE (XEXP (op, 0)) == VEC_SELECT)
42041 /* If the duplicated item is from a select, defer to the select
42042 processing to see if we can change the lane for the splat. */
42043 return rtx_is_swappable_p (XEXP (op, 0), special);
42044 else
42045 return 0;
42047 case VEC_SELECT:
42048 /* A vec_extract operation is ok if we change the lane. */
42049 if (GET_CODE (XEXP (op, 0)) == REG
42050 && GET_MODE_INNER (GET_MODE (XEXP (op, 0))) == GET_MODE (op)
42051 && GET_CODE ((parallel = XEXP (op, 1))) == PARALLEL
42052 && XVECLEN (parallel, 0) == 1
42053 && GET_CODE (XVECEXP (parallel, 0, 0)) == CONST_INT)
42055 *special = SH_EXTRACT;
42056 return 1;
42058 /* An XXPERMDI is ok if we adjust the lanes. Note that if the
42059 XXPERMDI is a swap operation, it will be identified by
42060 insn_is_swap_p and therefore we won't get here. */
42061 else if (GET_CODE (XEXP (op, 0)) == VEC_CONCAT
42062 && (GET_MODE (XEXP (op, 0)) == V4DFmode
42063 || GET_MODE (XEXP (op, 0)) == V4DImode)
42064 && GET_CODE ((parallel = XEXP (op, 1))) == PARALLEL
42065 && XVECLEN (parallel, 0) == 2
42066 && GET_CODE (XVECEXP (parallel, 0, 0)) == CONST_INT
42067 && GET_CODE (XVECEXP (parallel, 0, 1)) == CONST_INT)
42069 *special = SH_XXPERMDI;
42070 return 1;
42072 else if (v2df_reduction_p (op))
42073 return 1;
42074 else
42075 return 0;
42077 case UNSPEC:
42079 /* Various operations are unsafe for this optimization, at least
42080 without significant additional work. Permutes are obviously
42081 problematic, as both the permute control vector and the ordering
42082 of the target values are invalidated by doubleword swapping.
42083 Vector pack and unpack modify the number of vector lanes.
42084 Merge-high/low will not operate correctly on swapped operands.
42085 Vector shifts across element boundaries are clearly uncool,
42086 as are vector select and concatenate operations. Vector
42087 sum-across instructions define one operand with a specific
42088 order-dependent element, so additional fixup code would be
42089 needed to make those work. Vector set and non-immediate-form
42090 vector splat are element-order sensitive. A few of these
42091 cases might be workable with special handling if required.
42092 Adding cost modeling would be appropriate in some cases. */
42093 int val = XINT (op, 1);
42094 switch (val)
42096 default:
42097 break;
42098 case UNSPEC_VMRGH_DIRECT:
42099 case UNSPEC_VMRGL_DIRECT:
42100 case UNSPEC_VPACK_SIGN_SIGN_SAT:
42101 case UNSPEC_VPACK_SIGN_UNS_SAT:
42102 case UNSPEC_VPACK_UNS_UNS_MOD:
42103 case UNSPEC_VPACK_UNS_UNS_MOD_DIRECT:
42104 case UNSPEC_VPACK_UNS_UNS_SAT:
42105 case UNSPEC_VPERM:
42106 case UNSPEC_VPERM_UNS:
42107 case UNSPEC_VPERMHI:
42108 case UNSPEC_VPERMSI:
42109 case UNSPEC_VPKPX:
42110 case UNSPEC_VSLDOI:
42111 case UNSPEC_VSLO:
42112 case UNSPEC_VSRO:
42113 case UNSPEC_VSUM2SWS:
42114 case UNSPEC_VSUM4S:
42115 case UNSPEC_VSUM4UBS:
42116 case UNSPEC_VSUMSWS:
42117 case UNSPEC_VSUMSWS_DIRECT:
42118 case UNSPEC_VSX_CONCAT:
42119 case UNSPEC_VSX_SET:
42120 case UNSPEC_VSX_SLDWI:
42121 case UNSPEC_VUNPACK_HI_SIGN:
42122 case UNSPEC_VUNPACK_HI_SIGN_DIRECT:
42123 case UNSPEC_VUNPACK_LO_SIGN:
42124 case UNSPEC_VUNPACK_LO_SIGN_DIRECT:
42125 case UNSPEC_VUPKHPX:
42126 case UNSPEC_VUPKHS_V4SF:
42127 case UNSPEC_VUPKHU_V4SF:
42128 case UNSPEC_VUPKLPX:
42129 case UNSPEC_VUPKLS_V4SF:
42130 case UNSPEC_VUPKLU_V4SF:
42131 case UNSPEC_VSX_CVDPSPN:
42132 case UNSPEC_VSX_CVSPDP:
42133 case UNSPEC_VSX_CVSPDPN:
42134 case UNSPEC_VSX_EXTRACT:
42135 case UNSPEC_VSX_VSLO:
42136 case UNSPEC_VSX_VEC_INIT:
42137 return 0;
42138 case UNSPEC_VSPLT_DIRECT:
42139 case UNSPEC_VSX_XXSPLTD:
42140 *special = SH_SPLAT;
42141 return 1;
42142 case UNSPEC_REDUC_PLUS:
42143 case UNSPEC_REDUC:
42144 return 1;
42148 default:
42149 break;
42152 const char *fmt = GET_RTX_FORMAT (code);
42153 int ok = 1;
42155 for (i = 0; i < GET_RTX_LENGTH (code); ++i)
42156 if (fmt[i] == 'e' || fmt[i] == 'u')
42158 unsigned int special_op = SH_NONE;
42159 ok &= rtx_is_swappable_p (XEXP (op, i), &special_op);
42160 if (special_op == SH_NONE)
42161 continue;
42162 /* Ensure we never have two kinds of special handling
42163 for the same insn. */
42164 if (*special != SH_NONE && *special != special_op)
42165 return 0;
42166 *special = special_op;
42168 else if (fmt[i] == 'E')
42169 for (j = 0; j < XVECLEN (op, i); ++j)
42171 unsigned int special_op = SH_NONE;
42172 ok &= rtx_is_swappable_p (XVECEXP (op, i, j), &special_op);
42173 if (special_op == SH_NONE)
42174 continue;
42175 /* Ensure we never have two kinds of special handling
42176 for the same insn. */
42177 if (*special != SH_NONE && *special != special_op)
42178 return 0;
42179 *special = special_op;
42182 return ok;
42185 /* Return 1 iff INSN is an operand that will not be affected by
42186 having vector doublewords swapped in memory (in which case
42187 *SPECIAL is unchanged), or that can be modified to be correct
42188 if vector doublewords are swapped in memory (in which case
42189 *SPECIAL is changed to a value indicating how). */
42190 static unsigned int
42191 insn_is_swappable_p (swap_web_entry *insn_entry, rtx insn,
42192 unsigned int *special)
42194 /* Calls are always bad. */
42195 if (GET_CODE (insn) == CALL_INSN)
42196 return 0;
42198 /* Loads and stores seen here are not permuting, but we can still
42199 fix them up by converting them to permuting ones. Exceptions:
42200 UNSPEC_LVE, UNSPEC_LVX, and UNSPEC_STVX, which have a PARALLEL
42201 body instead of a SET; and UNSPEC_STVE, which has an UNSPEC
42202 for the SET source. Also we must now make an exception for lvx
42203 and stvx when they are not in the UNSPEC_LVX/STVX form (with the
42204 explicit "& -16") since this leads to unrecognizable insns. */
42205 rtx body = PATTERN (insn);
42206 int i = INSN_UID (insn);
42208 if (insn_entry[i].is_load)
42210 if (GET_CODE (body) == SET)
42212 rtx rhs = SET_SRC (body);
42213 /* Even without a swap, the RHS might be a vec_select for, say,
42214 a byte-reversing load. */
42215 if (GET_CODE (rhs) != MEM)
42216 return 0;
42217 if (GET_CODE (XEXP (rhs, 0)) == AND)
42218 return 0;
42220 *special = SH_NOSWAP_LD;
42221 return 1;
42223 else
42224 return 0;
42227 if (insn_entry[i].is_store)
42229 if (GET_CODE (body) == SET
42230 && GET_CODE (SET_SRC (body)) != UNSPEC)
42232 rtx lhs = SET_DEST (body);
42233 /* Even without a swap, the LHS might be a vec_select for, say,
42234 a byte-reversing store. */
42235 if (GET_CODE (lhs) != MEM)
42236 return 0;
42237 if (GET_CODE (XEXP (lhs, 0)) == AND)
42238 return 0;
42240 *special = SH_NOSWAP_ST;
42241 return 1;
42243 else
42244 return 0;
42247 /* A convert to single precision can be left as is provided that
42248 all of its uses are in xxspltw instructions that splat BE element
42249 zero. */
42250 if (GET_CODE (body) == SET
42251 && GET_CODE (SET_SRC (body)) == UNSPEC
42252 && XINT (SET_SRC (body), 1) == UNSPEC_VSX_CVDPSPN)
42254 df_ref def;
42255 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
42257 FOR_EACH_INSN_INFO_DEF (def, insn_info)
42259 struct df_link *link = DF_REF_CHAIN (def);
42260 if (!link)
42261 return 0;
42263 for (; link; link = link->next) {
42264 rtx use_insn = DF_REF_INSN (link->ref);
42265 rtx use_body = PATTERN (use_insn);
42266 if (GET_CODE (use_body) != SET
42267 || GET_CODE (SET_SRC (use_body)) != UNSPEC
42268 || XINT (SET_SRC (use_body), 1) != UNSPEC_VSX_XXSPLTW
42269 || XVECEXP (SET_SRC (use_body), 0, 1) != const0_rtx)
42270 return 0;
42274 return 1;
42277 /* A concatenation of two doublewords is ok if we reverse the
42278 order of the inputs. */
42279 if (GET_CODE (body) == SET
42280 && GET_CODE (SET_SRC (body)) == VEC_CONCAT
42281 && (GET_MODE (SET_SRC (body)) == V2DFmode
42282 || GET_MODE (SET_SRC (body)) == V2DImode))
42284 *special = SH_CONCAT;
42285 return 1;
42288 /* V2DF reductions are always swappable. */
42289 if (GET_CODE (body) == PARALLEL)
42291 rtx expr = XVECEXP (body, 0, 0);
42292 if (GET_CODE (expr) == SET
42293 && v2df_reduction_p (SET_SRC (expr)))
42294 return 1;
42297 /* An UNSPEC_VPERM is ok if the mask operand is loaded from the
42298 constant pool. */
42299 if (GET_CODE (body) == SET
42300 && GET_CODE (SET_SRC (body)) == UNSPEC
42301 && XINT (SET_SRC (body), 1) == UNSPEC_VPERM
42302 && XVECLEN (SET_SRC (body), 0) == 3
42303 && GET_CODE (XVECEXP (SET_SRC (body), 0, 2)) == REG)
42305 rtx mask_reg = XVECEXP (SET_SRC (body), 0, 2);
42306 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
42307 df_ref use;
42308 FOR_EACH_INSN_INFO_USE (use, insn_info)
42309 if (rtx_equal_p (DF_REF_REG (use), mask_reg))
42311 struct df_link *def_link = DF_REF_CHAIN (use);
42312 /* Punt if multiple definitions for this reg. */
42313 if (def_link && !def_link->next &&
42314 const_load_sequence_p (insn_entry,
42315 DF_REF_INSN (def_link->ref)))
42317 *special = SH_VPERM;
42318 return 1;
42323 /* Otherwise check the operands for vector lane violations. */
42324 return rtx_is_swappable_p (body, special);
42327 enum chain_purpose { FOR_LOADS, FOR_STORES };
42329 /* Return true if the UD or DU chain headed by LINK is non-empty,
42330 and every entry on the chain references an insn that is a
42331 register swap. Furthermore, if PURPOSE is FOR_LOADS, each such
42332 register swap must have only permuting loads as reaching defs.
42333 If PURPOSE is FOR_STORES, each such register swap must have only
42334 register swaps or permuting stores as reached uses. */
42335 static bool
42336 chain_contains_only_swaps (swap_web_entry *insn_entry, struct df_link *link,
42337 enum chain_purpose purpose)
42339 if (!link)
42340 return false;
42342 for (; link; link = link->next)
42344 if (!ALTIVEC_OR_VSX_VECTOR_MODE (GET_MODE (DF_REF_REG (link->ref))))
42345 continue;
42347 if (DF_REF_IS_ARTIFICIAL (link->ref))
42348 return false;
42350 rtx reached_insn = DF_REF_INSN (link->ref);
42351 unsigned uid = INSN_UID (reached_insn);
42352 struct df_insn_info *insn_info = DF_INSN_INFO_GET (reached_insn);
42354 if (!insn_entry[uid].is_swap || insn_entry[uid].is_load
42355 || insn_entry[uid].is_store)
42356 return false;
42358 if (purpose == FOR_LOADS)
42360 df_ref use;
42361 FOR_EACH_INSN_INFO_USE (use, insn_info)
42363 struct df_link *swap_link = DF_REF_CHAIN (use);
42365 while (swap_link)
42367 if (DF_REF_IS_ARTIFICIAL (link->ref))
42368 return false;
42370 rtx swap_def_insn = DF_REF_INSN (swap_link->ref);
42371 unsigned uid2 = INSN_UID (swap_def_insn);
42373 /* Only permuting loads are allowed. */
42374 if (!insn_entry[uid2].is_swap || !insn_entry[uid2].is_load)
42375 return false;
42377 swap_link = swap_link->next;
42381 else if (purpose == FOR_STORES)
42383 df_ref def;
42384 FOR_EACH_INSN_INFO_DEF (def, insn_info)
42386 struct df_link *swap_link = DF_REF_CHAIN (def);
42388 while (swap_link)
42390 if (DF_REF_IS_ARTIFICIAL (link->ref))
42391 return false;
42393 rtx swap_use_insn = DF_REF_INSN (swap_link->ref);
42394 unsigned uid2 = INSN_UID (swap_use_insn);
42396 /* Permuting stores or register swaps are allowed. */
42397 if (!insn_entry[uid2].is_swap || insn_entry[uid2].is_load)
42398 return false;
42400 swap_link = swap_link->next;
42406 return true;
42409 /* Mark the xxswapdi instructions associated with permuting loads and
42410 stores for removal. Note that we only flag them for deletion here,
42411 as there is a possibility of a swap being reached from multiple
42412 loads, etc. */
42413 static void
42414 mark_swaps_for_removal (swap_web_entry *insn_entry, unsigned int i)
42416 rtx insn = insn_entry[i].insn;
42417 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
42419 if (insn_entry[i].is_load)
42421 df_ref def;
42422 FOR_EACH_INSN_INFO_DEF (def, insn_info)
42424 struct df_link *link = DF_REF_CHAIN (def);
42426 /* We know by now that these are swaps, so we can delete
42427 them confidently. */
42428 while (link)
42430 rtx use_insn = DF_REF_INSN (link->ref);
42431 insn_entry[INSN_UID (use_insn)].will_delete = 1;
42432 link = link->next;
42436 else if (insn_entry[i].is_store)
42438 df_ref use;
42439 FOR_EACH_INSN_INFO_USE (use, insn_info)
42441 /* Ignore uses for addressability. */
42442 machine_mode mode = GET_MODE (DF_REF_REG (use));
42443 if (!ALTIVEC_OR_VSX_VECTOR_MODE (mode))
42444 continue;
42446 struct df_link *link = DF_REF_CHAIN (use);
42448 /* We know by now that these are swaps, so we can delete
42449 them confidently. */
42450 while (link)
42452 rtx def_insn = DF_REF_INSN (link->ref);
42453 insn_entry[INSN_UID (def_insn)].will_delete = 1;
42454 link = link->next;
42460 /* OP is either a CONST_VECTOR or an expression containing one.
42461 Swap the first half of the vector with the second in the first
42462 case. Recurse to find it in the second. */
42463 static void
42464 swap_const_vector_halves (rtx op)
42466 int i;
42467 enum rtx_code code = GET_CODE (op);
42468 if (GET_CODE (op) == CONST_VECTOR)
42470 int half_units = GET_MODE_NUNITS (GET_MODE (op)) / 2;
42471 for (i = 0; i < half_units; ++i)
42473 rtx temp = CONST_VECTOR_ELT (op, i);
42474 CONST_VECTOR_ELT (op, i) = CONST_VECTOR_ELT (op, i + half_units);
42475 CONST_VECTOR_ELT (op, i + half_units) = temp;
42478 else
42480 int j;
42481 const char *fmt = GET_RTX_FORMAT (code);
42482 for (i = 0; i < GET_RTX_LENGTH (code); ++i)
42483 if (fmt[i] == 'e' || fmt[i] == 'u')
42484 swap_const_vector_halves (XEXP (op, i));
42485 else if (fmt[i] == 'E')
42486 for (j = 0; j < XVECLEN (op, i); ++j)
42487 swap_const_vector_halves (XVECEXP (op, i, j));
42491 /* Find all subregs of a vector expression that perform a narrowing,
42492 and adjust the subreg index to account for doubleword swapping. */
42493 static void
42494 adjust_subreg_index (rtx op)
42496 enum rtx_code code = GET_CODE (op);
42497 if (code == SUBREG
42498 && (GET_MODE_SIZE (GET_MODE (op))
42499 < GET_MODE_SIZE (GET_MODE (XEXP (op, 0)))))
42501 unsigned int index = SUBREG_BYTE (op);
42502 if (index < 8)
42503 index += 8;
42504 else
42505 index -= 8;
42506 SUBREG_BYTE (op) = index;
42509 const char *fmt = GET_RTX_FORMAT (code);
42510 int i,j;
42511 for (i = 0; i < GET_RTX_LENGTH (code); ++i)
42512 if (fmt[i] == 'e' || fmt[i] == 'u')
42513 adjust_subreg_index (XEXP (op, i));
42514 else if (fmt[i] == 'E')
42515 for (j = 0; j < XVECLEN (op, i); ++j)
42516 adjust_subreg_index (XVECEXP (op, i, j));
42519 /* Convert the non-permuting load INSN to a permuting one. */
42520 static void
42521 permute_load (rtx_insn *insn)
42523 rtx body = PATTERN (insn);
42524 rtx mem_op = SET_SRC (body);
42525 rtx tgt_reg = SET_DEST (body);
42526 machine_mode mode = GET_MODE (tgt_reg);
42527 int n_elts = GET_MODE_NUNITS (mode);
42528 int half_elts = n_elts / 2;
42529 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
42530 int i, j;
42531 for (i = 0, j = half_elts; i < half_elts; ++i, ++j)
42532 XVECEXP (par, 0, i) = GEN_INT (j);
42533 for (i = half_elts, j = 0; j < half_elts; ++i, ++j)
42534 XVECEXP (par, 0, i) = GEN_INT (j);
42535 rtx sel = gen_rtx_VEC_SELECT (mode, mem_op, par);
42536 SET_SRC (body) = sel;
42537 INSN_CODE (insn) = -1; /* Force re-recognition. */
42538 df_insn_rescan (insn);
42540 if (dump_file)
42541 fprintf (dump_file, "Replacing load %d with permuted load\n",
42542 INSN_UID (insn));
42545 /* Convert the non-permuting store INSN to a permuting one. */
42546 static void
42547 permute_store (rtx_insn *insn)
42549 rtx body = PATTERN (insn);
42550 rtx src_reg = SET_SRC (body);
42551 machine_mode mode = GET_MODE (src_reg);
42552 int n_elts = GET_MODE_NUNITS (mode);
42553 int half_elts = n_elts / 2;
42554 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
42555 int i, j;
42556 for (i = 0, j = half_elts; i < half_elts; ++i, ++j)
42557 XVECEXP (par, 0, i) = GEN_INT (j);
42558 for (i = half_elts, j = 0; j < half_elts; ++i, ++j)
42559 XVECEXP (par, 0, i) = GEN_INT (j);
42560 rtx sel = gen_rtx_VEC_SELECT (mode, src_reg, par);
42561 SET_SRC (body) = sel;
42562 INSN_CODE (insn) = -1; /* Force re-recognition. */
42563 df_insn_rescan (insn);
42565 if (dump_file)
42566 fprintf (dump_file, "Replacing store %d with permuted store\n",
42567 INSN_UID (insn));
42570 /* Given OP that contains a vector extract operation, adjust the index
42571 of the extracted lane to account for the doubleword swap. */
42572 static void
42573 adjust_extract (rtx_insn *insn)
42575 rtx pattern = PATTERN (insn);
42576 if (GET_CODE (pattern) == PARALLEL)
42577 pattern = XVECEXP (pattern, 0, 0);
42578 rtx src = SET_SRC (pattern);
42579 /* The vec_select may be wrapped in a vec_duplicate for a splat, so
42580 account for that. */
42581 rtx sel = GET_CODE (src) == VEC_DUPLICATE ? XEXP (src, 0) : src;
42582 rtx par = XEXP (sel, 1);
42583 int half_elts = GET_MODE_NUNITS (GET_MODE (XEXP (sel, 0))) >> 1;
42584 int lane = INTVAL (XVECEXP (par, 0, 0));
42585 lane = lane >= half_elts ? lane - half_elts : lane + half_elts;
42586 XVECEXP (par, 0, 0) = GEN_INT (lane);
42587 INSN_CODE (insn) = -1; /* Force re-recognition. */
42588 df_insn_rescan (insn);
42590 if (dump_file)
42591 fprintf (dump_file, "Changing lane for extract %d\n", INSN_UID (insn));
42594 /* Given OP that contains a vector direct-splat operation, adjust the index
42595 of the source lane to account for the doubleword swap. */
42596 static void
42597 adjust_splat (rtx_insn *insn)
42599 rtx body = PATTERN (insn);
42600 rtx unspec = XEXP (body, 1);
42601 int half_elts = GET_MODE_NUNITS (GET_MODE (unspec)) >> 1;
42602 int lane = INTVAL (XVECEXP (unspec, 0, 1));
42603 lane = lane >= half_elts ? lane - half_elts : lane + half_elts;
42604 XVECEXP (unspec, 0, 1) = GEN_INT (lane);
42605 INSN_CODE (insn) = -1; /* Force re-recognition. */
42606 df_insn_rescan (insn);
42608 if (dump_file)
42609 fprintf (dump_file, "Changing lane for splat %d\n", INSN_UID (insn));
42612 /* Given OP that contains an XXPERMDI operation (that is not a doubleword
42613 swap), reverse the order of the source operands and adjust the indices
42614 of the source lanes to account for doubleword reversal. */
42615 static void
42616 adjust_xxpermdi (rtx_insn *insn)
42618 rtx set = PATTERN (insn);
42619 rtx select = XEXP (set, 1);
42620 rtx concat = XEXP (select, 0);
42621 rtx src0 = XEXP (concat, 0);
42622 XEXP (concat, 0) = XEXP (concat, 1);
42623 XEXP (concat, 1) = src0;
42624 rtx parallel = XEXP (select, 1);
42625 int lane0 = INTVAL (XVECEXP (parallel, 0, 0));
42626 int lane1 = INTVAL (XVECEXP (parallel, 0, 1));
42627 int new_lane0 = 3 - lane1;
42628 int new_lane1 = 3 - lane0;
42629 XVECEXP (parallel, 0, 0) = GEN_INT (new_lane0);
42630 XVECEXP (parallel, 0, 1) = GEN_INT (new_lane1);
42631 INSN_CODE (insn) = -1; /* Force re-recognition. */
42632 df_insn_rescan (insn);
42634 if (dump_file)
42635 fprintf (dump_file, "Changing lanes for xxpermdi %d\n", INSN_UID (insn));
42638 /* Given OP that contains a VEC_CONCAT operation of two doublewords,
42639 reverse the order of those inputs. */
42640 static void
42641 adjust_concat (rtx_insn *insn)
42643 rtx set = PATTERN (insn);
42644 rtx concat = XEXP (set, 1);
42645 rtx src0 = XEXP (concat, 0);
42646 XEXP (concat, 0) = XEXP (concat, 1);
42647 XEXP (concat, 1) = src0;
42648 INSN_CODE (insn) = -1; /* Force re-recognition. */
42649 df_insn_rescan (insn);
42651 if (dump_file)
42652 fprintf (dump_file, "Reversing inputs for concat %d\n", INSN_UID (insn));
42655 /* Given an UNSPEC_VPERM insn, modify the mask loaded from the
42656 constant pool to reflect swapped doublewords. */
42657 static void
42658 adjust_vperm (rtx_insn *insn)
42660 /* We previously determined that the UNSPEC_VPERM was fed by a
42661 swap of a swapping load of a TOC-relative constant pool symbol.
42662 Find the MEM in the swapping load and replace it with a MEM for
42663 the adjusted mask constant. */
42664 rtx set = PATTERN (insn);
42665 rtx mask_reg = XVECEXP (SET_SRC (set), 0, 2);
42667 /* Find the swap. */
42668 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
42669 df_ref use;
42670 rtx_insn *swap_insn = 0;
42671 FOR_EACH_INSN_INFO_USE (use, insn_info)
42672 if (rtx_equal_p (DF_REF_REG (use), mask_reg))
42674 struct df_link *def_link = DF_REF_CHAIN (use);
42675 gcc_assert (def_link && !def_link->next);
42676 swap_insn = DF_REF_INSN (def_link->ref);
42677 break;
42679 gcc_assert (swap_insn);
42681 /* Find the load. */
42682 insn_info = DF_INSN_INFO_GET (swap_insn);
42683 rtx_insn *load_insn = 0;
42684 FOR_EACH_INSN_INFO_USE (use, insn_info)
42686 struct df_link *def_link = DF_REF_CHAIN (use);
42687 gcc_assert (def_link && !def_link->next);
42688 load_insn = DF_REF_INSN (def_link->ref);
42689 break;
42691 gcc_assert (load_insn);
42693 /* Find the TOC-relative symbol access. */
42694 insn_info = DF_INSN_INFO_GET (load_insn);
42695 rtx_insn *tocrel_insn = 0;
42696 FOR_EACH_INSN_INFO_USE (use, insn_info)
42698 struct df_link *def_link = DF_REF_CHAIN (use);
42699 gcc_assert (def_link && !def_link->next);
42700 tocrel_insn = DF_REF_INSN (def_link->ref);
42701 break;
42703 gcc_assert (tocrel_insn);
42705 /* Find the embedded CONST_VECTOR. We have to call toc_relative_expr_p
42706 to set tocrel_base; otherwise it would be unnecessary as we've
42707 already established it will return true. */
42708 rtx base, offset;
42709 rtx tocrel_expr = SET_SRC (PATTERN (tocrel_insn));
42710 /* There is an extra level of indirection for small/large code models. */
42711 if (GET_CODE (tocrel_expr) == MEM)
42712 tocrel_expr = XEXP (tocrel_expr, 0);
42713 if (!toc_relative_expr_p (tocrel_expr, false))
42714 gcc_unreachable ();
42715 split_const (XVECEXP (tocrel_base, 0, 0), &base, &offset);
42716 rtx const_vector = get_pool_constant (base);
42717 /* With the extra indirection, get_pool_constant will produce the
42718 real constant from the reg_equal expression, so get the real
42719 constant. */
42720 if (GET_CODE (const_vector) == SYMBOL_REF)
42721 const_vector = get_pool_constant (const_vector);
42722 gcc_assert (GET_CODE (const_vector) == CONST_VECTOR);
42724 /* Create an adjusted mask from the initial mask. */
42725 unsigned int new_mask[16], i, val;
42726 for (i = 0; i < 16; ++i) {
42727 val = INTVAL (XVECEXP (const_vector, 0, i));
42728 if (val < 16)
42729 new_mask[i] = (val + 8) % 16;
42730 else
42731 new_mask[i] = ((val + 8) % 16) + 16;
42734 /* Create a new CONST_VECTOR and a MEM that references it. */
42735 rtx vals = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
42736 for (i = 0; i < 16; ++i)
42737 XVECEXP (vals, 0, i) = GEN_INT (new_mask[i]);
42738 rtx new_const_vector = gen_rtx_CONST_VECTOR (V16QImode, XVEC (vals, 0));
42739 rtx new_mem = force_const_mem (V16QImode, new_const_vector);
42740 /* This gives us a MEM whose base operand is a SYMBOL_REF, which we
42741 can't recognize. Force the SYMBOL_REF into a register. */
42742 if (!REG_P (XEXP (new_mem, 0))) {
42743 rtx base_reg = force_reg (Pmode, XEXP (new_mem, 0));
42744 XEXP (new_mem, 0) = base_reg;
42745 /* Move the newly created insn ahead of the load insn. */
42746 rtx_insn *force_insn = get_last_insn ();
42747 remove_insn (force_insn);
42748 rtx_insn *before_load_insn = PREV_INSN (load_insn);
42749 add_insn_after (force_insn, before_load_insn, BLOCK_FOR_INSN (load_insn));
42750 df_insn_rescan (before_load_insn);
42751 df_insn_rescan (force_insn);
42754 /* Replace the MEM in the load instruction and rescan it. */
42755 XEXP (SET_SRC (PATTERN (load_insn)), 0) = new_mem;
42756 INSN_CODE (load_insn) = -1; /* Force re-recognition. */
42757 df_insn_rescan (load_insn);
42759 if (dump_file)
42760 fprintf (dump_file, "Adjusting mask for vperm %d\n", INSN_UID (insn));
42763 /* The insn described by INSN_ENTRY[I] can be swapped, but only
42764 with special handling. Take care of that here. */
42765 static void
42766 handle_special_swappables (swap_web_entry *insn_entry, unsigned i)
42768 rtx_insn *insn = insn_entry[i].insn;
42769 rtx body = PATTERN (insn);
42771 switch (insn_entry[i].special_handling)
42773 default:
42774 gcc_unreachable ();
42775 case SH_CONST_VECTOR:
42777 /* A CONST_VECTOR will only show up somewhere in the RHS of a SET. */
42778 gcc_assert (GET_CODE (body) == SET);
42779 rtx rhs = SET_SRC (body);
42780 swap_const_vector_halves (rhs);
42781 if (dump_file)
42782 fprintf (dump_file, "Swapping constant halves in insn %d\n", i);
42783 break;
42785 case SH_SUBREG:
42786 /* A subreg of the same size is already safe. For subregs that
42787 select a smaller portion of a reg, adjust the index for
42788 swapped doublewords. */
42789 adjust_subreg_index (body);
42790 if (dump_file)
42791 fprintf (dump_file, "Adjusting subreg in insn %d\n", i);
42792 break;
42793 case SH_NOSWAP_LD:
42794 /* Convert a non-permuting load to a permuting one. */
42795 permute_load (insn);
42796 break;
42797 case SH_NOSWAP_ST:
42798 /* Convert a non-permuting store to a permuting one. */
42799 permute_store (insn);
42800 break;
42801 case SH_EXTRACT:
42802 /* Change the lane on an extract operation. */
42803 adjust_extract (insn);
42804 break;
42805 case SH_SPLAT:
42806 /* Change the lane on a direct-splat operation. */
42807 adjust_splat (insn);
42808 break;
42809 case SH_XXPERMDI:
42810 /* Change the lanes on an XXPERMDI operation. */
42811 adjust_xxpermdi (insn);
42812 break;
42813 case SH_CONCAT:
42814 /* Reverse the order of a concatenation operation. */
42815 adjust_concat (insn);
42816 break;
42817 case SH_VPERM:
42818 /* Change the mask loaded from the constant pool for a VPERM. */
42819 adjust_vperm (insn);
42820 break;
42824 /* Find the insn from the Ith table entry, which is known to be a
42825 register swap Y = SWAP(X). Replace it with a copy Y = X. */
42826 static void
42827 replace_swap_with_copy (swap_web_entry *insn_entry, unsigned i)
42829 rtx_insn *insn = insn_entry[i].insn;
42830 rtx body = PATTERN (insn);
42831 rtx src_reg = XEXP (SET_SRC (body), 0);
42832 rtx copy = gen_rtx_SET (SET_DEST (body), src_reg);
42833 rtx_insn *new_insn = emit_insn_before (copy, insn);
42834 set_block_for_insn (new_insn, BLOCK_FOR_INSN (insn));
42835 df_insn_rescan (new_insn);
42837 if (dump_file)
42839 unsigned int new_uid = INSN_UID (new_insn);
42840 fprintf (dump_file, "Replacing swap %d with copy %d\n", i, new_uid);
42843 df_insn_delete (insn);
42844 remove_insn (insn);
42845 insn->set_deleted ();
42848 /* Dump the swap table to DUMP_FILE. */
42849 static void
42850 dump_swap_insn_table (swap_web_entry *insn_entry)
42852 int e = get_max_uid ();
42853 fprintf (dump_file, "\nRelevant insns with their flag settings\n\n");
42855 for (int i = 0; i < e; ++i)
42856 if (insn_entry[i].is_relevant)
42858 swap_web_entry *pred_entry = (swap_web_entry *)insn_entry[i].pred ();
42859 fprintf (dump_file, "%6d %6d ", i,
42860 pred_entry && pred_entry->insn
42861 ? INSN_UID (pred_entry->insn) : 0);
42862 if (insn_entry[i].is_load)
42863 fputs ("load ", dump_file);
42864 if (insn_entry[i].is_store)
42865 fputs ("store ", dump_file);
42866 if (insn_entry[i].is_swap)
42867 fputs ("swap ", dump_file);
42868 if (insn_entry[i].is_live_in)
42869 fputs ("live-in ", dump_file);
42870 if (insn_entry[i].is_live_out)
42871 fputs ("live-out ", dump_file);
42872 if (insn_entry[i].contains_subreg)
42873 fputs ("subreg ", dump_file);
42874 if (insn_entry[i].is_128_int)
42875 fputs ("int128 ", dump_file);
42876 if (insn_entry[i].is_call)
42877 fputs ("call ", dump_file);
42878 if (insn_entry[i].is_swappable)
42880 fputs ("swappable ", dump_file);
42881 if (insn_entry[i].special_handling == SH_CONST_VECTOR)
42882 fputs ("special:constvec ", dump_file);
42883 else if (insn_entry[i].special_handling == SH_SUBREG)
42884 fputs ("special:subreg ", dump_file);
42885 else if (insn_entry[i].special_handling == SH_NOSWAP_LD)
42886 fputs ("special:load ", dump_file);
42887 else if (insn_entry[i].special_handling == SH_NOSWAP_ST)
42888 fputs ("special:store ", dump_file);
42889 else if (insn_entry[i].special_handling == SH_EXTRACT)
42890 fputs ("special:extract ", dump_file);
42891 else if (insn_entry[i].special_handling == SH_SPLAT)
42892 fputs ("special:splat ", dump_file);
42893 else if (insn_entry[i].special_handling == SH_XXPERMDI)
42894 fputs ("special:xxpermdi ", dump_file);
42895 else if (insn_entry[i].special_handling == SH_CONCAT)
42896 fputs ("special:concat ", dump_file);
42897 else if (insn_entry[i].special_handling == SH_VPERM)
42898 fputs ("special:vperm ", dump_file);
42900 if (insn_entry[i].web_not_optimizable)
42901 fputs ("unoptimizable ", dump_file);
42902 if (insn_entry[i].will_delete)
42903 fputs ("delete ", dump_file);
42904 fputs ("\n", dump_file);
42906 fputs ("\n", dump_file);
42909 /* Return RTX with its address canonicalized to (reg) or (+ reg reg).
42910 Here RTX is an (& addr (const_int -16)). Always return a new copy
42911 to avoid problems with combine. */
42912 static rtx
42913 alignment_with_canonical_addr (rtx align)
42915 rtx canon;
42916 rtx addr = XEXP (align, 0);
42918 if (REG_P (addr))
42919 canon = addr;
42921 else if (GET_CODE (addr) == PLUS)
42923 rtx addrop0 = XEXP (addr, 0);
42924 rtx addrop1 = XEXP (addr, 1);
42926 if (!REG_P (addrop0))
42927 addrop0 = force_reg (GET_MODE (addrop0), addrop0);
42929 if (!REG_P (addrop1))
42930 addrop1 = force_reg (GET_MODE (addrop1), addrop1);
42932 canon = gen_rtx_PLUS (GET_MODE (addr), addrop0, addrop1);
42935 else
42936 canon = force_reg (GET_MODE (addr), addr);
42938 return gen_rtx_AND (GET_MODE (align), canon, GEN_INT (-16));
42941 /* Check whether an rtx is an alignment mask, and if so, return
42942 a fully-expanded rtx for the masking operation. */
42943 static rtx
42944 alignment_mask (rtx_insn *insn)
42946 rtx body = PATTERN (insn);
42948 if (GET_CODE (body) != SET
42949 || GET_CODE (SET_SRC (body)) != AND
42950 || !REG_P (XEXP (SET_SRC (body), 0)))
42951 return 0;
42953 rtx mask = XEXP (SET_SRC (body), 1);
42955 if (GET_CODE (mask) == CONST_INT)
42957 if (INTVAL (mask) == -16)
42958 return alignment_with_canonical_addr (SET_SRC (body));
42959 else
42960 return 0;
42963 if (!REG_P (mask))
42964 return 0;
42966 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
42967 df_ref use;
42968 rtx real_mask = 0;
42970 FOR_EACH_INSN_INFO_USE (use, insn_info)
42972 if (!rtx_equal_p (DF_REF_REG (use), mask))
42973 continue;
42975 struct df_link *def_link = DF_REF_CHAIN (use);
42976 if (!def_link || def_link->next)
42977 return 0;
42979 rtx_insn *const_insn = DF_REF_INSN (def_link->ref);
42980 rtx const_body = PATTERN (const_insn);
42981 if (GET_CODE (const_body) != SET)
42982 return 0;
42984 real_mask = SET_SRC (const_body);
42986 if (GET_CODE (real_mask) != CONST_INT
42987 || INTVAL (real_mask) != -16)
42988 return 0;
42991 if (real_mask == 0)
42992 return 0;
42994 return alignment_with_canonical_addr (SET_SRC (body));
42997 /* Given INSN that's a load or store based at BASE_REG, look for a
42998 feeding computation that aligns its address on a 16-byte boundary. */
42999 static rtx
43000 find_alignment_op (rtx_insn *insn, rtx base_reg)
43002 df_ref base_use;
43003 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
43004 rtx and_operation = 0;
43006 FOR_EACH_INSN_INFO_USE (base_use, insn_info)
43008 if (!rtx_equal_p (DF_REF_REG (base_use), base_reg))
43009 continue;
43011 struct df_link *base_def_link = DF_REF_CHAIN (base_use);
43012 if (!base_def_link || base_def_link->next)
43013 break;
43015 /* With stack-protector code enabled, and possibly in other
43016 circumstances, there may not be an associated insn for
43017 the def. */
43018 if (DF_REF_IS_ARTIFICIAL (base_def_link->ref))
43019 break;
43021 rtx_insn *and_insn = DF_REF_INSN (base_def_link->ref);
43022 and_operation = alignment_mask (and_insn);
43023 if (and_operation != 0)
43024 break;
43027 return and_operation;
43030 struct del_info { bool replace; rtx_insn *replace_insn; };
43032 /* If INSN is the load for an lvx pattern, put it in canonical form. */
43033 static void
43034 recombine_lvx_pattern (rtx_insn *insn, del_info *to_delete)
43036 rtx body = PATTERN (insn);
43037 gcc_assert (GET_CODE (body) == SET
43038 && GET_CODE (SET_SRC (body)) == VEC_SELECT
43039 && GET_CODE (XEXP (SET_SRC (body), 0)) == MEM);
43041 rtx mem = XEXP (SET_SRC (body), 0);
43042 rtx base_reg = XEXP (mem, 0);
43044 rtx and_operation = find_alignment_op (insn, base_reg);
43046 if (and_operation != 0)
43048 df_ref def;
43049 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
43050 FOR_EACH_INSN_INFO_DEF (def, insn_info)
43052 struct df_link *link = DF_REF_CHAIN (def);
43053 if (!link || link->next)
43054 break;
43056 rtx_insn *swap_insn = DF_REF_INSN (link->ref);
43057 if (!insn_is_swap_p (swap_insn)
43058 || insn_is_load_p (swap_insn)
43059 || insn_is_store_p (swap_insn))
43060 break;
43062 /* Expected lvx pattern found. Change the swap to
43063 a copy, and propagate the AND operation into the
43064 load. */
43065 to_delete[INSN_UID (swap_insn)].replace = true;
43066 to_delete[INSN_UID (swap_insn)].replace_insn = swap_insn;
43068 XEXP (mem, 0) = and_operation;
43069 SET_SRC (body) = mem;
43070 INSN_CODE (insn) = -1; /* Force re-recognition. */
43071 df_insn_rescan (insn);
43073 if (dump_file)
43074 fprintf (dump_file, "lvx opportunity found at %d\n",
43075 INSN_UID (insn));
43080 /* If INSN is the store for an stvx pattern, put it in canonical form. */
43081 static void
43082 recombine_stvx_pattern (rtx_insn *insn, del_info *to_delete)
43084 rtx body = PATTERN (insn);
43085 gcc_assert (GET_CODE (body) == SET
43086 && GET_CODE (SET_DEST (body)) == MEM
43087 && GET_CODE (SET_SRC (body)) == VEC_SELECT);
43088 rtx mem = SET_DEST (body);
43089 rtx base_reg = XEXP (mem, 0);
43091 rtx and_operation = find_alignment_op (insn, base_reg);
43093 if (and_operation != 0)
43095 rtx src_reg = XEXP (SET_SRC (body), 0);
43096 df_ref src_use;
43097 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
43098 FOR_EACH_INSN_INFO_USE (src_use, insn_info)
43100 if (!rtx_equal_p (DF_REF_REG (src_use), src_reg))
43101 continue;
43103 struct df_link *link = DF_REF_CHAIN (src_use);
43104 if (!link || link->next)
43105 break;
43107 rtx_insn *swap_insn = DF_REF_INSN (link->ref);
43108 if (!insn_is_swap_p (swap_insn)
43109 || insn_is_load_p (swap_insn)
43110 || insn_is_store_p (swap_insn))
43111 break;
43113 /* Expected stvx pattern found. Change the swap to
43114 a copy, and propagate the AND operation into the
43115 store. */
43116 to_delete[INSN_UID (swap_insn)].replace = true;
43117 to_delete[INSN_UID (swap_insn)].replace_insn = swap_insn;
43119 XEXP (mem, 0) = and_operation;
43120 SET_SRC (body) = src_reg;
43121 INSN_CODE (insn) = -1; /* Force re-recognition. */
43122 df_insn_rescan (insn);
43124 if (dump_file)
43125 fprintf (dump_file, "stvx opportunity found at %d\n",
43126 INSN_UID (insn));
43131 /* Look for patterns created from builtin lvx and stvx calls, and
43132 canonicalize them to be properly recognized as such. */
43133 static void
43134 recombine_lvx_stvx_patterns (function *fun)
43136 int i;
43137 basic_block bb;
43138 rtx_insn *insn;
43140 int num_insns = get_max_uid ();
43141 del_info *to_delete = XCNEWVEC (del_info, num_insns);
43143 FOR_ALL_BB_FN (bb, fun)
43144 FOR_BB_INSNS (bb, insn)
43146 if (!NONDEBUG_INSN_P (insn))
43147 continue;
43149 if (insn_is_load_p (insn) && insn_is_swap_p (insn))
43150 recombine_lvx_pattern (insn, to_delete);
43151 else if (insn_is_store_p (insn) && insn_is_swap_p (insn))
43152 recombine_stvx_pattern (insn, to_delete);
43155 /* Turning swaps into copies is delayed until now, to avoid problems
43156 with deleting instructions during the insn walk. */
43157 for (i = 0; i < num_insns; i++)
43158 if (to_delete[i].replace)
43160 rtx swap_body = PATTERN (to_delete[i].replace_insn);
43161 rtx src_reg = XEXP (SET_SRC (swap_body), 0);
43162 rtx copy = gen_rtx_SET (SET_DEST (swap_body), src_reg);
43163 rtx_insn *new_insn = emit_insn_before (copy,
43164 to_delete[i].replace_insn);
43165 set_block_for_insn (new_insn,
43166 BLOCK_FOR_INSN (to_delete[i].replace_insn));
43167 df_insn_rescan (new_insn);
43168 df_insn_delete (to_delete[i].replace_insn);
43169 remove_insn (to_delete[i].replace_insn);
43170 to_delete[i].replace_insn->set_deleted ();
43173 free (to_delete);
43176 /* Main entry point for this pass. */
43177 unsigned int
43178 rs6000_analyze_swaps (function *fun)
43180 swap_web_entry *insn_entry;
43181 basic_block bb;
43182 rtx_insn *insn, *curr_insn = 0;
43184 /* Dataflow analysis for use-def chains. */
43185 df_set_flags (DF_RD_PRUNE_DEAD_DEFS);
43186 df_chain_add_problem (DF_DU_CHAIN | DF_UD_CHAIN);
43187 df_analyze ();
43188 df_set_flags (DF_DEFER_INSN_RESCAN);
43190 /* Pre-pass to recombine lvx and stvx patterns so we don't lose info. */
43191 recombine_lvx_stvx_patterns (fun);
43193 /* Allocate structure to represent webs of insns. */
43194 insn_entry = XCNEWVEC (swap_web_entry, get_max_uid ());
43196 /* Walk the insns to gather basic data. */
43197 FOR_ALL_BB_FN (bb, fun)
43198 FOR_BB_INSNS_SAFE (bb, insn, curr_insn)
43200 unsigned int uid = INSN_UID (insn);
43201 if (NONDEBUG_INSN_P (insn))
43203 insn_entry[uid].insn = insn;
43205 if (GET_CODE (insn) == CALL_INSN)
43206 insn_entry[uid].is_call = 1;
43208 /* Walk the uses and defs to see if we mention vector regs.
43209 Record any constraints on optimization of such mentions. */
43210 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
43211 df_ref mention;
43212 FOR_EACH_INSN_INFO_USE (mention, insn_info)
43214 /* We use DF_REF_REAL_REG here to get inside any subregs. */
43215 machine_mode mode = GET_MODE (DF_REF_REAL_REG (mention));
43217 /* If a use gets its value from a call insn, it will be
43218 a hard register and will look like (reg:V4SI 3 3).
43219 The df analysis creates two mentions for GPR3 and GPR4,
43220 both DImode. We must recognize this and treat it as a
43221 vector mention to ensure the call is unioned with this
43222 use. */
43223 if (mode == DImode && DF_REF_INSN_INFO (mention))
43225 rtx feeder = DF_REF_INSN (mention);
43226 /* FIXME: It is pretty hard to get from the df mention
43227 to the mode of the use in the insn. We arbitrarily
43228 pick a vector mode here, even though the use might
43229 be a real DImode. We can be too conservative
43230 (create a web larger than necessary) because of
43231 this, so consider eventually fixing this. */
43232 if (GET_CODE (feeder) == CALL_INSN)
43233 mode = V4SImode;
43236 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode) || mode == TImode)
43238 insn_entry[uid].is_relevant = 1;
43239 if (mode == TImode || mode == V1TImode
43240 || FLOAT128_VECTOR_P (mode))
43241 insn_entry[uid].is_128_int = 1;
43242 if (DF_REF_INSN_INFO (mention))
43243 insn_entry[uid].contains_subreg
43244 = !rtx_equal_p (DF_REF_REG (mention),
43245 DF_REF_REAL_REG (mention));
43246 union_defs (insn_entry, insn, mention);
43249 FOR_EACH_INSN_INFO_DEF (mention, insn_info)
43251 /* We use DF_REF_REAL_REG here to get inside any subregs. */
43252 machine_mode mode = GET_MODE (DF_REF_REAL_REG (mention));
43254 /* If we're loading up a hard vector register for a call,
43255 it looks like (set (reg:V4SI 9 9) (...)). The df
43256 analysis creates two mentions for GPR9 and GPR10, both
43257 DImode. So relying on the mode from the mentions
43258 isn't sufficient to ensure we union the call into the
43259 web with the parameter setup code. */
43260 if (mode == DImode && GET_CODE (insn) == SET
43261 && ALTIVEC_OR_VSX_VECTOR_MODE (GET_MODE (SET_DEST (insn))))
43262 mode = GET_MODE (SET_DEST (insn));
43264 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode) || mode == TImode)
43266 insn_entry[uid].is_relevant = 1;
43267 if (mode == TImode || mode == V1TImode
43268 || FLOAT128_VECTOR_P (mode))
43269 insn_entry[uid].is_128_int = 1;
43270 if (DF_REF_INSN_INFO (mention))
43271 insn_entry[uid].contains_subreg
43272 = !rtx_equal_p (DF_REF_REG (mention),
43273 DF_REF_REAL_REG (mention));
43274 /* REG_FUNCTION_VALUE_P is not valid for subregs. */
43275 else if (REG_FUNCTION_VALUE_P (DF_REF_REG (mention)))
43276 insn_entry[uid].is_live_out = 1;
43277 union_uses (insn_entry, insn, mention);
43281 if (insn_entry[uid].is_relevant)
43283 /* Determine if this is a load or store. */
43284 insn_entry[uid].is_load = insn_is_load_p (insn);
43285 insn_entry[uid].is_store = insn_is_store_p (insn);
43287 /* Determine if this is a doubleword swap. If not,
43288 determine whether it can legally be swapped. */
43289 if (insn_is_swap_p (insn))
43290 insn_entry[uid].is_swap = 1;
43291 else
43293 unsigned int special = SH_NONE;
43294 insn_entry[uid].is_swappable
43295 = insn_is_swappable_p (insn_entry, insn, &special);
43296 if (special != SH_NONE && insn_entry[uid].contains_subreg)
43297 insn_entry[uid].is_swappable = 0;
43298 else if (special != SH_NONE)
43299 insn_entry[uid].special_handling = special;
43300 else if (insn_entry[uid].contains_subreg)
43301 insn_entry[uid].special_handling = SH_SUBREG;
43307 if (dump_file)
43309 fprintf (dump_file, "\nSwap insn entry table when first built\n");
43310 dump_swap_insn_table (insn_entry);
43313 /* Record unoptimizable webs. */
43314 unsigned e = get_max_uid (), i;
43315 for (i = 0; i < e; ++i)
43317 if (!insn_entry[i].is_relevant)
43318 continue;
43320 swap_web_entry *root
43321 = (swap_web_entry*)(&insn_entry[i])->unionfind_root ();
43323 if (insn_entry[i].is_live_in || insn_entry[i].is_live_out
43324 || (insn_entry[i].contains_subreg
43325 && insn_entry[i].special_handling != SH_SUBREG)
43326 || insn_entry[i].is_128_int || insn_entry[i].is_call
43327 || !(insn_entry[i].is_swappable || insn_entry[i].is_swap))
43328 root->web_not_optimizable = 1;
43330 /* If we have loads or stores that aren't permuting then the
43331 optimization isn't appropriate. */
43332 else if ((insn_entry[i].is_load || insn_entry[i].is_store)
43333 && !insn_entry[i].is_swap && !insn_entry[i].is_swappable)
43334 root->web_not_optimizable = 1;
43336 /* If we have permuting loads or stores that are not accompanied
43337 by a register swap, the optimization isn't appropriate. */
43338 else if (insn_entry[i].is_load && insn_entry[i].is_swap)
43340 rtx insn = insn_entry[i].insn;
43341 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
43342 df_ref def;
43344 FOR_EACH_INSN_INFO_DEF (def, insn_info)
43346 struct df_link *link = DF_REF_CHAIN (def);
43348 if (!chain_contains_only_swaps (insn_entry, link, FOR_LOADS))
43350 root->web_not_optimizable = 1;
43351 break;
43355 else if (insn_entry[i].is_store && insn_entry[i].is_swap)
43357 rtx insn = insn_entry[i].insn;
43358 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
43359 df_ref use;
43361 FOR_EACH_INSN_INFO_USE (use, insn_info)
43363 struct df_link *link = DF_REF_CHAIN (use);
43365 if (!chain_contains_only_swaps (insn_entry, link, FOR_STORES))
43367 root->web_not_optimizable = 1;
43368 break;
43374 if (dump_file)
43376 fprintf (dump_file, "\nSwap insn entry table after web analysis\n");
43377 dump_swap_insn_table (insn_entry);
43380 /* For each load and store in an optimizable web (which implies
43381 the loads and stores are permuting), find the associated
43382 register swaps and mark them for removal. Due to various
43383 optimizations we may mark the same swap more than once. Also
43384 perform special handling for swappable insns that require it. */
43385 for (i = 0; i < e; ++i)
43386 if ((insn_entry[i].is_load || insn_entry[i].is_store)
43387 && insn_entry[i].is_swap)
43389 swap_web_entry* root_entry
43390 = (swap_web_entry*)((&insn_entry[i])->unionfind_root ());
43391 if (!root_entry->web_not_optimizable)
43392 mark_swaps_for_removal (insn_entry, i);
43394 else if (insn_entry[i].is_swappable && insn_entry[i].special_handling)
43396 swap_web_entry* root_entry
43397 = (swap_web_entry*)((&insn_entry[i])->unionfind_root ());
43398 if (!root_entry->web_not_optimizable)
43399 handle_special_swappables (insn_entry, i);
43402 /* Now delete the swaps marked for removal. */
43403 for (i = 0; i < e; ++i)
43404 if (insn_entry[i].will_delete)
43405 replace_swap_with_copy (insn_entry, i);
43407 /* Clean up. */
43408 free (insn_entry);
43409 return 0;
43412 const pass_data pass_data_analyze_swaps =
43414 RTL_PASS, /* type */
43415 "swaps", /* name */
43416 OPTGROUP_NONE, /* optinfo_flags */
43417 TV_NONE, /* tv_id */
43418 0, /* properties_required */
43419 0, /* properties_provided */
43420 0, /* properties_destroyed */
43421 0, /* todo_flags_start */
43422 TODO_df_finish, /* todo_flags_finish */
43425 class pass_analyze_swaps : public rtl_opt_pass
43427 public:
43428 pass_analyze_swaps(gcc::context *ctxt)
43429 : rtl_opt_pass(pass_data_analyze_swaps, ctxt)
43432 /* opt_pass methods: */
43433 virtual bool gate (function *)
43435 return (optimize > 0 && !BYTES_BIG_ENDIAN && TARGET_VSX
43436 && !TARGET_P9_VECTOR && rs6000_optimize_swaps);
43439 virtual unsigned int execute (function *fun)
43441 return rs6000_analyze_swaps (fun);
43444 opt_pass *clone ()
43446 return new pass_analyze_swaps (m_ctxt);
43449 }; // class pass_analyze_swaps
43451 rtl_opt_pass *
43452 make_pass_analyze_swaps (gcc::context *ctxt)
43454 return new pass_analyze_swaps (ctxt);
43457 #ifdef RS6000_GLIBC_ATOMIC_FENV
43458 /* Function declarations for rs6000_atomic_assign_expand_fenv. */
43459 static tree atomic_hold_decl, atomic_clear_decl, atomic_update_decl;
43460 #endif
43462 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
43464 static void
43465 rs6000_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
43467 if (!TARGET_HARD_FLOAT || !TARGET_FPRS)
43469 #ifdef RS6000_GLIBC_ATOMIC_FENV
43470 if (atomic_hold_decl == NULL_TREE)
43472 atomic_hold_decl
43473 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
43474 get_identifier ("__atomic_feholdexcept"),
43475 build_function_type_list (void_type_node,
43476 double_ptr_type_node,
43477 NULL_TREE));
43478 TREE_PUBLIC (atomic_hold_decl) = 1;
43479 DECL_EXTERNAL (atomic_hold_decl) = 1;
43482 if (atomic_clear_decl == NULL_TREE)
43484 atomic_clear_decl
43485 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
43486 get_identifier ("__atomic_feclearexcept"),
43487 build_function_type_list (void_type_node,
43488 NULL_TREE));
43489 TREE_PUBLIC (atomic_clear_decl) = 1;
43490 DECL_EXTERNAL (atomic_clear_decl) = 1;
43493 tree const_double = build_qualified_type (double_type_node,
43494 TYPE_QUAL_CONST);
43495 tree const_double_ptr = build_pointer_type (const_double);
43496 if (atomic_update_decl == NULL_TREE)
43498 atomic_update_decl
43499 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
43500 get_identifier ("__atomic_feupdateenv"),
43501 build_function_type_list (void_type_node,
43502 const_double_ptr,
43503 NULL_TREE));
43504 TREE_PUBLIC (atomic_update_decl) = 1;
43505 DECL_EXTERNAL (atomic_update_decl) = 1;
43508 tree fenv_var = create_tmp_var_raw (double_type_node);
43509 TREE_ADDRESSABLE (fenv_var) = 1;
43510 tree fenv_addr = build1 (ADDR_EXPR, double_ptr_type_node, fenv_var);
43512 *hold = build_call_expr (atomic_hold_decl, 1, fenv_addr);
43513 *clear = build_call_expr (atomic_clear_decl, 0);
43514 *update = build_call_expr (atomic_update_decl, 1,
43515 fold_convert (const_double_ptr, fenv_addr));
43516 #endif
43517 return;
43520 tree mffs = rs6000_builtin_decls[RS6000_BUILTIN_MFFS];
43521 tree mtfsf = rs6000_builtin_decls[RS6000_BUILTIN_MTFSF];
43522 tree call_mffs = build_call_expr (mffs, 0);
43524 /* Generates the equivalent of feholdexcept (&fenv_var)
43526 *fenv_var = __builtin_mffs ();
43527 double fenv_hold;
43528 *(uint64_t*)&fenv_hold = *(uint64_t*)fenv_var & 0xffffffff00000007LL;
43529 __builtin_mtfsf (0xff, fenv_hold); */
43531 /* Mask to clear everything except for the rounding modes and non-IEEE
43532 arithmetic flag. */
43533 const unsigned HOST_WIDE_INT hold_exception_mask =
43534 HOST_WIDE_INT_C (0xffffffff00000007);
43536 tree fenv_var = create_tmp_var_raw (double_type_node);
43538 tree hold_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_var, call_mffs);
43540 tree fenv_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_var);
43541 tree fenv_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
43542 build_int_cst (uint64_type_node,
43543 hold_exception_mask));
43545 tree fenv_hold_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
43546 fenv_llu_and);
43548 tree hold_mtfsf = build_call_expr (mtfsf, 2,
43549 build_int_cst (unsigned_type_node, 0xff),
43550 fenv_hold_mtfsf);
43552 *hold = build2 (COMPOUND_EXPR, void_type_node, hold_mffs, hold_mtfsf);
43554 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT):
43556 double fenv_clear = __builtin_mffs ();
43557 *(uint64_t)&fenv_clear &= 0xffffffff00000000LL;
43558 __builtin_mtfsf (0xff, fenv_clear); */
43560 /* Mask to clear everything except for the rounding modes and non-IEEE
43561 arithmetic flag. */
43562 const unsigned HOST_WIDE_INT clear_exception_mask =
43563 HOST_WIDE_INT_C (0xffffffff00000000);
43565 tree fenv_clear = create_tmp_var_raw (double_type_node);
43567 tree clear_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_clear, call_mffs);
43569 tree fenv_clean_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_clear);
43570 tree fenv_clear_llu_and = build2 (BIT_AND_EXPR, uint64_type_node,
43571 fenv_clean_llu,
43572 build_int_cst (uint64_type_node,
43573 clear_exception_mask));
43575 tree fenv_clear_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
43576 fenv_clear_llu_and);
43578 tree clear_mtfsf = build_call_expr (mtfsf, 2,
43579 build_int_cst (unsigned_type_node, 0xff),
43580 fenv_clear_mtfsf);
43582 *clear = build2 (COMPOUND_EXPR, void_type_node, clear_mffs, clear_mtfsf);
43584 /* Generates the equivalent of feupdateenv (&fenv_var)
43586 double old_fenv = __builtin_mffs ();
43587 double fenv_update;
43588 *(uint64_t*)&fenv_update = (*(uint64_t*)&old & 0xffffffff1fffff00LL) |
43589 (*(uint64_t*)fenv_var 0x1ff80fff);
43590 __builtin_mtfsf (0xff, fenv_update); */
43592 const unsigned HOST_WIDE_INT update_exception_mask =
43593 HOST_WIDE_INT_C (0xffffffff1fffff00);
43594 const unsigned HOST_WIDE_INT new_exception_mask =
43595 HOST_WIDE_INT_C (0x1ff80fff);
43597 tree old_fenv = create_tmp_var_raw (double_type_node);
43598 tree update_mffs = build2 (MODIFY_EXPR, void_type_node, old_fenv, call_mffs);
43600 tree old_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, old_fenv);
43601 tree old_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, old_llu,
43602 build_int_cst (uint64_type_node,
43603 update_exception_mask));
43605 tree new_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
43606 build_int_cst (uint64_type_node,
43607 new_exception_mask));
43609 tree new_llu_mask = build2 (BIT_IOR_EXPR, uint64_type_node,
43610 old_llu_and, new_llu_and);
43612 tree fenv_update_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
43613 new_llu_mask);
43615 tree update_mtfsf = build_call_expr (mtfsf, 2,
43616 build_int_cst (unsigned_type_node, 0xff),
43617 fenv_update_mtfsf);
43619 *update = build2 (COMPOUND_EXPR, void_type_node, update_mffs, update_mtfsf);
43622 /* Implement the TARGET_OPTAB_SUPPORTED_P hook. */
43624 static bool
43625 rs6000_optab_supported_p (int op, machine_mode mode1, machine_mode,
43626 optimization_type opt_type)
43628 switch (op)
43630 case rsqrt_optab:
43631 return (opt_type == OPTIMIZE_FOR_SPEED
43632 && RS6000_RECIP_AUTO_RSQRTE_P (mode1));
43634 default:
43635 return true;
43639 struct gcc_target targetm = TARGET_INITIALIZER;
43641 #include "gt-rs6000.h"