[1/77] Add an E_ prefix to mode names
[official-gcc.git] / gcc / config / powerpcspe / powerpcspe.c
blob4cb10d1cb4bdcde8d75480d5a6a0df5620aadb38
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2017 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "memmodel.h"
28 #include "gimple.h"
29 #include "cfghooks.h"
30 #include "cfgloop.h"
31 #include "df.h"
32 #include "tm_p.h"
33 #include "stringpool.h"
34 #include "attribs.h"
35 #include "expmed.h"
36 #include "optabs.h"
37 #include "regs.h"
38 #include "ira.h"
39 #include "recog.h"
40 #include "cgraph.h"
41 #include "diagnostic-core.h"
42 #include "insn-attr.h"
43 #include "flags.h"
44 #include "alias.h"
45 #include "fold-const.h"
46 #include "stor-layout.h"
47 #include "calls.h"
48 #include "print-tree.h"
49 #include "varasm.h"
50 #include "explow.h"
51 #include "expr.h"
52 #include "output.h"
53 #include "dbxout.h"
54 #include "common/common-target.h"
55 #include "langhooks.h"
56 #include "reload.h"
57 #include "sched-int.h"
58 #include "gimplify.h"
59 #include "gimple-fold.h"
60 #include "gimple-iterator.h"
61 #include "gimple-ssa.h"
62 #include "gimple-walk.h"
63 #include "intl.h"
64 #include "params.h"
65 #include "tm-constrs.h"
66 #include "tree-vectorizer.h"
67 #include "target-globals.h"
68 #include "builtins.h"
69 #include "context.h"
70 #include "tree-pass.h"
71 #include "except.h"
72 #if TARGET_XCOFF
73 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
74 #endif
75 #if TARGET_MACHO
76 #include "gstab.h" /* for N_SLINE */
77 #endif
78 #include "case-cfn-macros.h"
79 #include "ppc-auxv.h"
81 /* This file should be included last. */
82 #include "target-def.h"
84 #ifndef TARGET_NO_PROTOTYPE
85 #define TARGET_NO_PROTOTYPE 0
86 #endif
88 #define min(A,B) ((A) < (B) ? (A) : (B))
89 #define max(A,B) ((A) > (B) ? (A) : (B))
91 /* Structure used to define the rs6000 stack */
92 typedef struct rs6000_stack {
93 int reload_completed; /* stack info won't change from here on */
94 int first_gp_reg_save; /* first callee saved GP register used */
95 int first_fp_reg_save; /* first callee saved FP register used */
96 int first_altivec_reg_save; /* first callee saved AltiVec register used */
97 int lr_save_p; /* true if the link reg needs to be saved */
98 int cr_save_p; /* true if the CR reg needs to be saved */
99 unsigned int vrsave_mask; /* mask of vec registers to save */
100 int push_p; /* true if we need to allocate stack space */
101 int calls_p; /* true if the function makes any calls */
102 int world_save_p; /* true if we're saving *everything*:
103 r13-r31, cr, f14-f31, vrsave, v20-v31 */
104 enum rs6000_abi abi; /* which ABI to use */
105 int gp_save_offset; /* offset to save GP regs from initial SP */
106 int fp_save_offset; /* offset to save FP regs from initial SP */
107 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
108 int lr_save_offset; /* offset to save LR from initial SP */
109 int cr_save_offset; /* offset to save CR from initial SP */
110 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
111 int spe_gp_save_offset; /* offset to save spe 64-bit gprs */
112 int varargs_save_offset; /* offset to save the varargs registers */
113 int ehrd_offset; /* offset to EH return data */
114 int ehcr_offset; /* offset to EH CR field data */
115 int reg_size; /* register size (4 or 8) */
116 HOST_WIDE_INT vars_size; /* variable save area size */
117 int parm_size; /* outgoing parameter size */
118 int save_size; /* save area size */
119 int fixed_size; /* fixed size of stack frame */
120 int gp_size; /* size of saved GP registers */
121 int fp_size; /* size of saved FP registers */
122 int altivec_size; /* size of saved AltiVec registers */
123 int cr_size; /* size to hold CR if not in fixed area */
124 int vrsave_size; /* size to hold VRSAVE */
125 int altivec_padding_size; /* size of altivec alignment padding */
126 int spe_gp_size; /* size of 64-bit GPR save size for SPE */
127 int spe_padding_size;
128 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
129 int spe_64bit_regs_used;
130 int savres_strategy;
131 } rs6000_stack_t;
133 /* A C structure for machine-specific, per-function data.
134 This is added to the cfun structure. */
135 typedef struct GTY(()) machine_function
137 /* Whether the instruction chain has been scanned already. */
138 int spe_insn_chain_scanned_p;
139 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
140 int ra_needs_full_frame;
141 /* Flags if __builtin_return_address (0) was used. */
142 int ra_need_lr;
143 /* Cache lr_save_p after expansion of builtin_eh_return. */
144 int lr_save_state;
145 /* Whether we need to save the TOC to the reserved stack location in the
146 function prologue. */
147 bool save_toc_in_prologue;
148 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
149 varargs save area. */
150 HOST_WIDE_INT varargs_save_offset;
151 /* Temporary stack slot to use for SDmode copies. This slot is
152 64-bits wide and is allocated early enough so that the offset
153 does not overflow the 16-bit load/store offset field. */
154 rtx sdmode_stack_slot;
155 /* Alternative internal arg pointer for -fsplit-stack. */
156 rtx split_stack_arg_pointer;
157 bool split_stack_argp_used;
158 /* Flag if r2 setup is needed with ELFv2 ABI. */
159 bool r2_setup_needed;
160 /* The number of components we use for separate shrink-wrapping. */
161 int n_components;
162 /* The components already handled by separate shrink-wrapping, which should
163 not be considered by the prologue and epilogue. */
164 bool gpr_is_wrapped_separately[32];
165 bool fpr_is_wrapped_separately[32];
166 bool lr_is_wrapped_separately;
167 } machine_function;
169 /* Support targetm.vectorize.builtin_mask_for_load. */
170 static GTY(()) tree altivec_builtin_mask_for_load;
172 /* Set to nonzero once AIX common-mode calls have been defined. */
173 static GTY(()) int common_mode_defined;
175 /* Label number of label created for -mrelocatable, to call to so we can
176 get the address of the GOT section */
177 static int rs6000_pic_labelno;
179 #ifdef USING_ELFOS_H
180 /* Counter for labels which are to be placed in .fixup. */
181 int fixuplabelno = 0;
182 #endif
184 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
185 int dot_symbols;
187 /* Specify the machine mode that pointers have. After generation of rtl, the
188 compiler makes no further distinction between pointers and any other objects
189 of this machine mode. The type is unsigned since not all things that
190 include powerpcspe.h also include machmode.h. */
191 unsigned rs6000_pmode;
193 /* Width in bits of a pointer. */
194 unsigned rs6000_pointer_size;
196 #ifdef HAVE_AS_GNU_ATTRIBUTE
197 # ifndef HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
198 # define HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE 0
199 # endif
200 /* Flag whether floating point values have been passed/returned.
201 Note that this doesn't say whether fprs are used, since the
202 Tag_GNU_Power_ABI_FP .gnu.attributes value this flag controls
203 should be set for soft-float values passed in gprs and ieee128
204 values passed in vsx registers. */
205 static bool rs6000_passes_float;
206 static bool rs6000_passes_long_double;
207 /* Flag whether vector values have been passed/returned. */
208 static bool rs6000_passes_vector;
209 /* Flag whether small (<= 8 byte) structures have been returned. */
210 static bool rs6000_returns_struct;
211 #endif
213 /* Value is TRUE if register/mode pair is acceptable. */
214 bool rs6000_hard_regno_mode_ok_p[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
216 /* Maximum number of registers needed for a given register class and mode. */
217 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
219 /* How many registers are needed for a given register and mode. */
220 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
222 /* Map register number to register class. */
223 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
225 static int dbg_cost_ctrl;
227 /* Built in types. */
228 tree rs6000_builtin_types[RS6000_BTI_MAX];
229 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
231 /* Flag to say the TOC is initialized */
232 int toc_initialized, need_toc_init;
233 char toc_label_name[10];
235 /* Cached value of rs6000_variable_issue. This is cached in
236 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
237 static short cached_can_issue_more;
239 static GTY(()) section *read_only_data_section;
240 static GTY(()) section *private_data_section;
241 static GTY(()) section *tls_data_section;
242 static GTY(()) section *tls_private_data_section;
243 static GTY(()) section *read_only_private_data_section;
244 static GTY(()) section *sdata2_section;
245 static GTY(()) section *toc_section;
247 struct builtin_description
249 const HOST_WIDE_INT mask;
250 const enum insn_code icode;
251 const char *const name;
252 const enum rs6000_builtins code;
255 /* Describe the vector unit used for modes. */
256 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
257 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
259 /* Register classes for various constraints that are based on the target
260 switches. */
261 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
263 /* Describe the alignment of a vector. */
264 int rs6000_vector_align[NUM_MACHINE_MODES];
266 /* Map selected modes to types for builtins. */
267 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
269 /* What modes to automatically generate reciprocal divide estimate (fre) and
270 reciprocal sqrt (frsqrte) for. */
271 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
273 /* Masks to determine which reciprocal esitmate instructions to generate
274 automatically. */
275 enum rs6000_recip_mask {
276 RECIP_SF_DIV = 0x001, /* Use divide estimate */
277 RECIP_DF_DIV = 0x002,
278 RECIP_V4SF_DIV = 0x004,
279 RECIP_V2DF_DIV = 0x008,
281 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
282 RECIP_DF_RSQRT = 0x020,
283 RECIP_V4SF_RSQRT = 0x040,
284 RECIP_V2DF_RSQRT = 0x080,
286 /* Various combination of flags for -mrecip=xxx. */
287 RECIP_NONE = 0,
288 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
289 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
290 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
292 RECIP_HIGH_PRECISION = RECIP_ALL,
294 /* On low precision machines like the power5, don't enable double precision
295 reciprocal square root estimate, since it isn't accurate enough. */
296 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
299 /* -mrecip options. */
300 static struct
302 const char *string; /* option name */
303 unsigned int mask; /* mask bits to set */
304 } recip_options[] = {
305 { "all", RECIP_ALL },
306 { "none", RECIP_NONE },
307 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
308 | RECIP_V2DF_DIV) },
309 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
310 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
311 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
312 | RECIP_V2DF_RSQRT) },
313 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
314 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
317 /* Used by __builtin_cpu_is(), mapping from PLATFORM names to values. */
318 static const struct
320 const char *cpu;
321 unsigned int cpuid;
322 } cpu_is_info[] = {
323 { "power9", PPC_PLATFORM_POWER9 },
324 { "power8", PPC_PLATFORM_POWER8 },
325 { "power7", PPC_PLATFORM_POWER7 },
326 { "power6x", PPC_PLATFORM_POWER6X },
327 { "power6", PPC_PLATFORM_POWER6 },
328 { "power5+", PPC_PLATFORM_POWER5_PLUS },
329 { "power5", PPC_PLATFORM_POWER5 },
330 { "ppc970", PPC_PLATFORM_PPC970 },
331 { "power4", PPC_PLATFORM_POWER4 },
332 { "ppca2", PPC_PLATFORM_PPCA2 },
333 { "ppc476", PPC_PLATFORM_PPC476 },
334 { "ppc464", PPC_PLATFORM_PPC464 },
335 { "ppc440", PPC_PLATFORM_PPC440 },
336 { "ppc405", PPC_PLATFORM_PPC405 },
337 { "ppc-cell-be", PPC_PLATFORM_CELL_BE }
340 /* Used by __builtin_cpu_supports(), mapping from HWCAP names to masks. */
341 static const struct
343 const char *hwcap;
344 int mask;
345 unsigned int id;
346 } cpu_supports_info[] = {
347 /* AT_HWCAP masks. */
348 { "4xxmac", PPC_FEATURE_HAS_4xxMAC, 0 },
349 { "altivec", PPC_FEATURE_HAS_ALTIVEC, 0 },
350 { "arch_2_05", PPC_FEATURE_ARCH_2_05, 0 },
351 { "arch_2_06", PPC_FEATURE_ARCH_2_06, 0 },
352 { "archpmu", PPC_FEATURE_PERFMON_COMPAT, 0 },
353 { "booke", PPC_FEATURE_BOOKE, 0 },
354 { "cellbe", PPC_FEATURE_CELL_BE, 0 },
355 { "dfp", PPC_FEATURE_HAS_DFP, 0 },
356 { "efpdouble", PPC_FEATURE_HAS_EFP_DOUBLE, 0 },
357 { "efpsingle", PPC_FEATURE_HAS_EFP_SINGLE, 0 },
358 { "fpu", PPC_FEATURE_HAS_FPU, 0 },
359 { "ic_snoop", PPC_FEATURE_ICACHE_SNOOP, 0 },
360 { "mmu", PPC_FEATURE_HAS_MMU, 0 },
361 { "notb", PPC_FEATURE_NO_TB, 0 },
362 { "pa6t", PPC_FEATURE_PA6T, 0 },
363 { "power4", PPC_FEATURE_POWER4, 0 },
364 { "power5", PPC_FEATURE_POWER5, 0 },
365 { "power5+", PPC_FEATURE_POWER5_PLUS, 0 },
366 { "power6x", PPC_FEATURE_POWER6_EXT, 0 },
367 { "ppc32", PPC_FEATURE_32, 0 },
368 { "ppc601", PPC_FEATURE_601_INSTR, 0 },
369 { "ppc64", PPC_FEATURE_64, 0 },
370 { "ppcle", PPC_FEATURE_PPC_LE, 0 },
371 { "smt", PPC_FEATURE_SMT, 0 },
372 { "spe", PPC_FEATURE_HAS_SPE, 0 },
373 { "true_le", PPC_FEATURE_TRUE_LE, 0 },
374 { "ucache", PPC_FEATURE_UNIFIED_CACHE, 0 },
375 { "vsx", PPC_FEATURE_HAS_VSX, 0 },
377 /* AT_HWCAP2 masks. */
378 { "arch_2_07", PPC_FEATURE2_ARCH_2_07, 1 },
379 { "dscr", PPC_FEATURE2_HAS_DSCR, 1 },
380 { "ebb", PPC_FEATURE2_HAS_EBB, 1 },
381 { "htm", PPC_FEATURE2_HAS_HTM, 1 },
382 { "htm-nosc", PPC_FEATURE2_HTM_NOSC, 1 },
383 { "isel", PPC_FEATURE2_HAS_ISEL, 1 },
384 { "tar", PPC_FEATURE2_HAS_TAR, 1 },
385 { "vcrypto", PPC_FEATURE2_HAS_VEC_CRYPTO, 1 },
386 { "arch_3_00", PPC_FEATURE2_ARCH_3_00, 1 },
387 { "ieee128", PPC_FEATURE2_HAS_IEEE128, 1 }
390 /* Newer LIBCs explicitly export this symbol to declare that they provide
391 the AT_PLATFORM and AT_HWCAP/AT_HWCAP2 values in the TCB. We emit a
392 reference to this symbol whenever we expand a CPU builtin, so that
393 we never link against an old LIBC. */
394 const char *tcb_verification_symbol = "__parse_hwcap_and_convert_at_platform";
396 /* True if we have expanded a CPU builtin. */
397 bool cpu_builtin_p;
399 /* Pointer to function (in powerpcspe-c.c) that can define or undefine target
400 macros that have changed. Languages that don't support the preprocessor
401 don't link in powerpcspe-c.c, so we can't call it directly. */
402 void (*rs6000_target_modify_macros_ptr) (bool, HOST_WIDE_INT, HOST_WIDE_INT);
404 /* Simplfy register classes into simpler classifications. We assume
405 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
406 check for standard register classes (gpr/floating/altivec/vsx) and
407 floating/vector classes (float/altivec/vsx). */
409 enum rs6000_reg_type {
410 NO_REG_TYPE,
411 PSEUDO_REG_TYPE,
412 GPR_REG_TYPE,
413 VSX_REG_TYPE,
414 ALTIVEC_REG_TYPE,
415 FPR_REG_TYPE,
416 SPR_REG_TYPE,
417 CR_REG_TYPE,
418 SPE_ACC_TYPE,
419 SPEFSCR_REG_TYPE
422 /* Map register class to register type. */
423 static enum rs6000_reg_type reg_class_to_reg_type[N_REG_CLASSES];
425 /* First/last register type for the 'normal' register types (i.e. general
426 purpose, floating point, altivec, and VSX registers). */
427 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
429 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
432 /* Register classes we care about in secondary reload or go if legitimate
433 address. We only need to worry about GPR, FPR, and Altivec registers here,
434 along an ANY field that is the OR of the 3 register classes. */
436 enum rs6000_reload_reg_type {
437 RELOAD_REG_GPR, /* General purpose registers. */
438 RELOAD_REG_FPR, /* Traditional floating point regs. */
439 RELOAD_REG_VMX, /* Altivec (VMX) registers. */
440 RELOAD_REG_ANY, /* OR of GPR, FPR, Altivec masks. */
441 N_RELOAD_REG
444 /* For setting up register classes, loop through the 3 register classes mapping
445 into real registers, and skip the ANY class, which is just an OR of the
446 bits. */
447 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
448 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
450 /* Map reload register type to a register in the register class. */
451 struct reload_reg_map_type {
452 const char *name; /* Register class name. */
453 int reg; /* Register in the register class. */
456 static const struct reload_reg_map_type reload_reg_map[N_RELOAD_REG] = {
457 { "Gpr", FIRST_GPR_REGNO }, /* RELOAD_REG_GPR. */
458 { "Fpr", FIRST_FPR_REGNO }, /* RELOAD_REG_FPR. */
459 { "VMX", FIRST_ALTIVEC_REGNO }, /* RELOAD_REG_VMX. */
460 { "Any", -1 }, /* RELOAD_REG_ANY. */
463 /* Mask bits for each register class, indexed per mode. Historically the
464 compiler has been more restrictive which types can do PRE_MODIFY instead of
465 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
466 typedef unsigned char addr_mask_type;
468 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
469 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
470 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
471 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
472 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
473 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
474 #define RELOAD_REG_AND_M16 0x40 /* AND -16 addressing. */
475 #define RELOAD_REG_QUAD_OFFSET 0x80 /* quad offset is limited. */
477 /* Register type masks based on the type, of valid addressing modes. */
478 struct rs6000_reg_addr {
479 enum insn_code reload_load; /* INSN to reload for loading. */
480 enum insn_code reload_store; /* INSN to reload for storing. */
481 enum insn_code reload_fpr_gpr; /* INSN to move from FPR to GPR. */
482 enum insn_code reload_gpr_vsx; /* INSN to move from GPR to VSX. */
483 enum insn_code reload_vsx_gpr; /* INSN to move from VSX to GPR. */
484 enum insn_code fusion_gpr_ld; /* INSN for fusing gpr ADDIS/loads. */
485 /* INSNs for fusing addi with loads
486 or stores for each reg. class. */
487 enum insn_code fusion_addi_ld[(int)N_RELOAD_REG];
488 enum insn_code fusion_addi_st[(int)N_RELOAD_REG];
489 /* INSNs for fusing addis with loads
490 or stores for each reg. class. */
491 enum insn_code fusion_addis_ld[(int)N_RELOAD_REG];
492 enum insn_code fusion_addis_st[(int)N_RELOAD_REG];
493 addr_mask_type addr_mask[(int)N_RELOAD_REG]; /* Valid address masks. */
494 bool scalar_in_vmx_p; /* Scalar value can go in VMX. */
495 bool fused_toc; /* Mode supports TOC fusion. */
498 static struct rs6000_reg_addr reg_addr[NUM_MACHINE_MODES];
500 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
501 static inline bool
502 mode_supports_pre_incdec_p (machine_mode mode)
504 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_INCDEC)
505 != 0);
508 /* Helper function to say whether a mode supports PRE_MODIFY. */
509 static inline bool
510 mode_supports_pre_modify_p (machine_mode mode)
512 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_MODIFY)
513 != 0);
516 /* Given that there exists at least one variable that is set (produced)
517 by OUT_INSN and read (consumed) by IN_INSN, return true iff
518 IN_INSN represents one or more memory store operations and none of
519 the variables set by OUT_INSN is used by IN_INSN as the address of a
520 store operation. If either IN_INSN or OUT_INSN does not represent
521 a "single" RTL SET expression (as loosely defined by the
522 implementation of the single_set function) or a PARALLEL with only
523 SETs, CLOBBERs, and USEs inside, this function returns false.
525 This rs6000-specific version of store_data_bypass_p checks for
526 certain conditions that result in assertion failures (and internal
527 compiler errors) in the generic store_data_bypass_p function and
528 returns false rather than calling store_data_bypass_p if one of the
529 problematic conditions is detected. */
532 rs6000_store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
534 rtx out_set, in_set;
535 rtx out_pat, in_pat;
536 rtx out_exp, in_exp;
537 int i, j;
539 in_set = single_set (in_insn);
540 if (in_set)
542 if (MEM_P (SET_DEST (in_set)))
544 out_set = single_set (out_insn);
545 if (!out_set)
547 out_pat = PATTERN (out_insn);
548 if (GET_CODE (out_pat) == PARALLEL)
550 for (i = 0; i < XVECLEN (out_pat, 0); i++)
552 out_exp = XVECEXP (out_pat, 0, i);
553 if ((GET_CODE (out_exp) == CLOBBER)
554 || (GET_CODE (out_exp) == USE))
555 continue;
556 else if (GET_CODE (out_exp) != SET)
557 return false;
563 else
565 in_pat = PATTERN (in_insn);
566 if (GET_CODE (in_pat) != PARALLEL)
567 return false;
569 for (i = 0; i < XVECLEN (in_pat, 0); i++)
571 in_exp = XVECEXP (in_pat, 0, i);
572 if ((GET_CODE (in_exp) == CLOBBER) || (GET_CODE (in_exp) == USE))
573 continue;
574 else if (GET_CODE (in_exp) != SET)
575 return false;
577 if (MEM_P (SET_DEST (in_exp)))
579 out_set = single_set (out_insn);
580 if (!out_set)
582 out_pat = PATTERN (out_insn);
583 if (GET_CODE (out_pat) != PARALLEL)
584 return false;
585 for (j = 0; j < XVECLEN (out_pat, 0); j++)
587 out_exp = XVECEXP (out_pat, 0, j);
588 if ((GET_CODE (out_exp) == CLOBBER)
589 || (GET_CODE (out_exp) == USE))
590 continue;
591 else if (GET_CODE (out_exp) != SET)
592 return false;
598 return store_data_bypass_p (out_insn, in_insn);
601 /* Return true if we have D-form addressing in altivec registers. */
602 static inline bool
603 mode_supports_vmx_dform (machine_mode mode)
605 return ((reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_OFFSET) != 0);
608 /* Return true if we have D-form addressing in VSX registers. This addressing
609 is more limited than normal d-form addressing in that the offset must be
610 aligned on a 16-byte boundary. */
611 static inline bool
612 mode_supports_vsx_dform_quad (machine_mode mode)
614 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_QUAD_OFFSET)
615 != 0);
619 /* Target cpu costs. */
621 struct processor_costs {
622 const int mulsi; /* cost of SImode multiplication. */
623 const int mulsi_const; /* cost of SImode multiplication by constant. */
624 const int mulsi_const9; /* cost of SImode mult by short constant. */
625 const int muldi; /* cost of DImode multiplication. */
626 const int divsi; /* cost of SImode division. */
627 const int divdi; /* cost of DImode division. */
628 const int fp; /* cost of simple SFmode and DFmode insns. */
629 const int dmul; /* cost of DFmode multiplication (and fmadd). */
630 const int sdiv; /* cost of SFmode division (fdivs). */
631 const int ddiv; /* cost of DFmode division (fdiv). */
632 const int cache_line_size; /* cache line size in bytes. */
633 const int l1_cache_size; /* size of l1 cache, in kilobytes. */
634 const int l2_cache_size; /* size of l2 cache, in kilobytes. */
635 const int simultaneous_prefetches; /* number of parallel prefetch
636 operations. */
637 const int sfdf_convert; /* cost of SF->DF conversion. */
640 const struct processor_costs *rs6000_cost;
642 /* Processor costs (relative to an add) */
644 /* Instruction size costs on 32bit processors. */
645 static const
646 struct processor_costs size32_cost = {
647 COSTS_N_INSNS (1), /* mulsi */
648 COSTS_N_INSNS (1), /* mulsi_const */
649 COSTS_N_INSNS (1), /* mulsi_const9 */
650 COSTS_N_INSNS (1), /* muldi */
651 COSTS_N_INSNS (1), /* divsi */
652 COSTS_N_INSNS (1), /* divdi */
653 COSTS_N_INSNS (1), /* fp */
654 COSTS_N_INSNS (1), /* dmul */
655 COSTS_N_INSNS (1), /* sdiv */
656 COSTS_N_INSNS (1), /* ddiv */
657 32, /* cache line size */
658 0, /* l1 cache */
659 0, /* l2 cache */
660 0, /* streams */
661 0, /* SF->DF convert */
664 /* Instruction size costs on 64bit processors. */
665 static const
666 struct processor_costs size64_cost = {
667 COSTS_N_INSNS (1), /* mulsi */
668 COSTS_N_INSNS (1), /* mulsi_const */
669 COSTS_N_INSNS (1), /* mulsi_const9 */
670 COSTS_N_INSNS (1), /* muldi */
671 COSTS_N_INSNS (1), /* divsi */
672 COSTS_N_INSNS (1), /* divdi */
673 COSTS_N_INSNS (1), /* fp */
674 COSTS_N_INSNS (1), /* dmul */
675 COSTS_N_INSNS (1), /* sdiv */
676 COSTS_N_INSNS (1), /* ddiv */
677 128, /* cache line size */
678 0, /* l1 cache */
679 0, /* l2 cache */
680 0, /* streams */
681 0, /* SF->DF convert */
684 /* Instruction costs on RS64A processors. */
685 static const
686 struct processor_costs rs64a_cost = {
687 COSTS_N_INSNS (20), /* mulsi */
688 COSTS_N_INSNS (12), /* mulsi_const */
689 COSTS_N_INSNS (8), /* mulsi_const9 */
690 COSTS_N_INSNS (34), /* muldi */
691 COSTS_N_INSNS (65), /* divsi */
692 COSTS_N_INSNS (67), /* divdi */
693 COSTS_N_INSNS (4), /* fp */
694 COSTS_N_INSNS (4), /* dmul */
695 COSTS_N_INSNS (31), /* sdiv */
696 COSTS_N_INSNS (31), /* ddiv */
697 128, /* cache line size */
698 128, /* l1 cache */
699 2048, /* l2 cache */
700 1, /* streams */
701 0, /* SF->DF convert */
704 /* Instruction costs on MPCCORE processors. */
705 static const
706 struct processor_costs mpccore_cost = {
707 COSTS_N_INSNS (2), /* mulsi */
708 COSTS_N_INSNS (2), /* mulsi_const */
709 COSTS_N_INSNS (2), /* mulsi_const9 */
710 COSTS_N_INSNS (2), /* muldi */
711 COSTS_N_INSNS (6), /* divsi */
712 COSTS_N_INSNS (6), /* divdi */
713 COSTS_N_INSNS (4), /* fp */
714 COSTS_N_INSNS (5), /* dmul */
715 COSTS_N_INSNS (10), /* sdiv */
716 COSTS_N_INSNS (17), /* ddiv */
717 32, /* cache line size */
718 4, /* l1 cache */
719 16, /* l2 cache */
720 1, /* streams */
721 0, /* SF->DF convert */
724 /* Instruction costs on PPC403 processors. */
725 static const
726 struct processor_costs ppc403_cost = {
727 COSTS_N_INSNS (4), /* mulsi */
728 COSTS_N_INSNS (4), /* mulsi_const */
729 COSTS_N_INSNS (4), /* mulsi_const9 */
730 COSTS_N_INSNS (4), /* muldi */
731 COSTS_N_INSNS (33), /* divsi */
732 COSTS_N_INSNS (33), /* divdi */
733 COSTS_N_INSNS (11), /* fp */
734 COSTS_N_INSNS (11), /* dmul */
735 COSTS_N_INSNS (11), /* sdiv */
736 COSTS_N_INSNS (11), /* ddiv */
737 32, /* cache line size */
738 4, /* l1 cache */
739 16, /* l2 cache */
740 1, /* streams */
741 0, /* SF->DF convert */
744 /* Instruction costs on PPC405 processors. */
745 static const
746 struct processor_costs ppc405_cost = {
747 COSTS_N_INSNS (5), /* mulsi */
748 COSTS_N_INSNS (4), /* mulsi_const */
749 COSTS_N_INSNS (3), /* mulsi_const9 */
750 COSTS_N_INSNS (5), /* muldi */
751 COSTS_N_INSNS (35), /* divsi */
752 COSTS_N_INSNS (35), /* divdi */
753 COSTS_N_INSNS (11), /* fp */
754 COSTS_N_INSNS (11), /* dmul */
755 COSTS_N_INSNS (11), /* sdiv */
756 COSTS_N_INSNS (11), /* ddiv */
757 32, /* cache line size */
758 16, /* l1 cache */
759 128, /* l2 cache */
760 1, /* streams */
761 0, /* SF->DF convert */
764 /* Instruction costs on PPC440 processors. */
765 static const
766 struct processor_costs ppc440_cost = {
767 COSTS_N_INSNS (3), /* mulsi */
768 COSTS_N_INSNS (2), /* mulsi_const */
769 COSTS_N_INSNS (2), /* mulsi_const9 */
770 COSTS_N_INSNS (3), /* muldi */
771 COSTS_N_INSNS (34), /* divsi */
772 COSTS_N_INSNS (34), /* divdi */
773 COSTS_N_INSNS (5), /* fp */
774 COSTS_N_INSNS (5), /* dmul */
775 COSTS_N_INSNS (19), /* sdiv */
776 COSTS_N_INSNS (33), /* ddiv */
777 32, /* cache line size */
778 32, /* l1 cache */
779 256, /* l2 cache */
780 1, /* streams */
781 0, /* SF->DF convert */
784 /* Instruction costs on PPC476 processors. */
785 static const
786 struct processor_costs ppc476_cost = {
787 COSTS_N_INSNS (4), /* mulsi */
788 COSTS_N_INSNS (4), /* mulsi_const */
789 COSTS_N_INSNS (4), /* mulsi_const9 */
790 COSTS_N_INSNS (4), /* muldi */
791 COSTS_N_INSNS (11), /* divsi */
792 COSTS_N_INSNS (11), /* divdi */
793 COSTS_N_INSNS (6), /* fp */
794 COSTS_N_INSNS (6), /* dmul */
795 COSTS_N_INSNS (19), /* sdiv */
796 COSTS_N_INSNS (33), /* ddiv */
797 32, /* l1 cache line size */
798 32, /* l1 cache */
799 512, /* l2 cache */
800 1, /* streams */
801 0, /* SF->DF convert */
804 /* Instruction costs on PPC601 processors. */
805 static const
806 struct processor_costs ppc601_cost = {
807 COSTS_N_INSNS (5), /* mulsi */
808 COSTS_N_INSNS (5), /* mulsi_const */
809 COSTS_N_INSNS (5), /* mulsi_const9 */
810 COSTS_N_INSNS (5), /* muldi */
811 COSTS_N_INSNS (36), /* divsi */
812 COSTS_N_INSNS (36), /* divdi */
813 COSTS_N_INSNS (4), /* fp */
814 COSTS_N_INSNS (5), /* dmul */
815 COSTS_N_INSNS (17), /* sdiv */
816 COSTS_N_INSNS (31), /* ddiv */
817 32, /* cache line size */
818 32, /* l1 cache */
819 256, /* l2 cache */
820 1, /* streams */
821 0, /* SF->DF convert */
824 /* Instruction costs on PPC603 processors. */
825 static const
826 struct processor_costs ppc603_cost = {
827 COSTS_N_INSNS (5), /* mulsi */
828 COSTS_N_INSNS (3), /* mulsi_const */
829 COSTS_N_INSNS (2), /* mulsi_const9 */
830 COSTS_N_INSNS (5), /* muldi */
831 COSTS_N_INSNS (37), /* divsi */
832 COSTS_N_INSNS (37), /* divdi */
833 COSTS_N_INSNS (3), /* fp */
834 COSTS_N_INSNS (4), /* dmul */
835 COSTS_N_INSNS (18), /* sdiv */
836 COSTS_N_INSNS (33), /* ddiv */
837 32, /* cache line size */
838 8, /* l1 cache */
839 64, /* l2 cache */
840 1, /* streams */
841 0, /* SF->DF convert */
844 /* Instruction costs on PPC604 processors. */
845 static const
846 struct processor_costs ppc604_cost = {
847 COSTS_N_INSNS (4), /* mulsi */
848 COSTS_N_INSNS (4), /* mulsi_const */
849 COSTS_N_INSNS (4), /* mulsi_const9 */
850 COSTS_N_INSNS (4), /* muldi */
851 COSTS_N_INSNS (20), /* divsi */
852 COSTS_N_INSNS (20), /* divdi */
853 COSTS_N_INSNS (3), /* fp */
854 COSTS_N_INSNS (3), /* dmul */
855 COSTS_N_INSNS (18), /* sdiv */
856 COSTS_N_INSNS (32), /* ddiv */
857 32, /* cache line size */
858 16, /* l1 cache */
859 512, /* l2 cache */
860 1, /* streams */
861 0, /* SF->DF convert */
864 /* Instruction costs on PPC604e processors. */
865 static const
866 struct processor_costs ppc604e_cost = {
867 COSTS_N_INSNS (2), /* mulsi */
868 COSTS_N_INSNS (2), /* mulsi_const */
869 COSTS_N_INSNS (2), /* mulsi_const9 */
870 COSTS_N_INSNS (2), /* muldi */
871 COSTS_N_INSNS (20), /* divsi */
872 COSTS_N_INSNS (20), /* divdi */
873 COSTS_N_INSNS (3), /* fp */
874 COSTS_N_INSNS (3), /* dmul */
875 COSTS_N_INSNS (18), /* sdiv */
876 COSTS_N_INSNS (32), /* ddiv */
877 32, /* cache line size */
878 32, /* l1 cache */
879 1024, /* l2 cache */
880 1, /* streams */
881 0, /* SF->DF convert */
884 /* Instruction costs on PPC620 processors. */
885 static const
886 struct processor_costs ppc620_cost = {
887 COSTS_N_INSNS (5), /* mulsi */
888 COSTS_N_INSNS (4), /* mulsi_const */
889 COSTS_N_INSNS (3), /* mulsi_const9 */
890 COSTS_N_INSNS (7), /* muldi */
891 COSTS_N_INSNS (21), /* divsi */
892 COSTS_N_INSNS (37), /* divdi */
893 COSTS_N_INSNS (3), /* fp */
894 COSTS_N_INSNS (3), /* dmul */
895 COSTS_N_INSNS (18), /* sdiv */
896 COSTS_N_INSNS (32), /* ddiv */
897 128, /* cache line size */
898 32, /* l1 cache */
899 1024, /* l2 cache */
900 1, /* streams */
901 0, /* SF->DF convert */
904 /* Instruction costs on PPC630 processors. */
905 static const
906 struct processor_costs ppc630_cost = {
907 COSTS_N_INSNS (5), /* mulsi */
908 COSTS_N_INSNS (4), /* mulsi_const */
909 COSTS_N_INSNS (3), /* mulsi_const9 */
910 COSTS_N_INSNS (7), /* muldi */
911 COSTS_N_INSNS (21), /* divsi */
912 COSTS_N_INSNS (37), /* divdi */
913 COSTS_N_INSNS (3), /* fp */
914 COSTS_N_INSNS (3), /* dmul */
915 COSTS_N_INSNS (17), /* sdiv */
916 COSTS_N_INSNS (21), /* ddiv */
917 128, /* cache line size */
918 64, /* l1 cache */
919 1024, /* l2 cache */
920 1, /* streams */
921 0, /* SF->DF convert */
924 /* Instruction costs on Cell processor. */
925 /* COSTS_N_INSNS (1) ~ one add. */
926 static const
927 struct processor_costs ppccell_cost = {
928 COSTS_N_INSNS (9/2)+2, /* mulsi */
929 COSTS_N_INSNS (6/2), /* mulsi_const */
930 COSTS_N_INSNS (6/2), /* mulsi_const9 */
931 COSTS_N_INSNS (15/2)+2, /* muldi */
932 COSTS_N_INSNS (38/2), /* divsi */
933 COSTS_N_INSNS (70/2), /* divdi */
934 COSTS_N_INSNS (10/2), /* fp */
935 COSTS_N_INSNS (10/2), /* dmul */
936 COSTS_N_INSNS (74/2), /* sdiv */
937 COSTS_N_INSNS (74/2), /* ddiv */
938 128, /* cache line size */
939 32, /* l1 cache */
940 512, /* l2 cache */
941 6, /* streams */
942 0, /* SF->DF convert */
945 /* Instruction costs on PPC750 and PPC7400 processors. */
946 static const
947 struct processor_costs ppc750_cost = {
948 COSTS_N_INSNS (5), /* mulsi */
949 COSTS_N_INSNS (3), /* mulsi_const */
950 COSTS_N_INSNS (2), /* mulsi_const9 */
951 COSTS_N_INSNS (5), /* muldi */
952 COSTS_N_INSNS (17), /* divsi */
953 COSTS_N_INSNS (17), /* divdi */
954 COSTS_N_INSNS (3), /* fp */
955 COSTS_N_INSNS (3), /* dmul */
956 COSTS_N_INSNS (17), /* sdiv */
957 COSTS_N_INSNS (31), /* ddiv */
958 32, /* cache line size */
959 32, /* l1 cache */
960 512, /* l2 cache */
961 1, /* streams */
962 0, /* SF->DF convert */
965 /* Instruction costs on PPC7450 processors. */
966 static const
967 struct processor_costs ppc7450_cost = {
968 COSTS_N_INSNS (4), /* mulsi */
969 COSTS_N_INSNS (3), /* mulsi_const */
970 COSTS_N_INSNS (3), /* mulsi_const9 */
971 COSTS_N_INSNS (4), /* muldi */
972 COSTS_N_INSNS (23), /* divsi */
973 COSTS_N_INSNS (23), /* divdi */
974 COSTS_N_INSNS (5), /* fp */
975 COSTS_N_INSNS (5), /* dmul */
976 COSTS_N_INSNS (21), /* sdiv */
977 COSTS_N_INSNS (35), /* ddiv */
978 32, /* cache line size */
979 32, /* l1 cache */
980 1024, /* l2 cache */
981 1, /* streams */
982 0, /* SF->DF convert */
985 /* Instruction costs on PPC8540 processors. */
986 static const
987 struct processor_costs ppc8540_cost = {
988 COSTS_N_INSNS (4), /* mulsi */
989 COSTS_N_INSNS (4), /* mulsi_const */
990 COSTS_N_INSNS (4), /* mulsi_const9 */
991 COSTS_N_INSNS (4), /* muldi */
992 COSTS_N_INSNS (19), /* divsi */
993 COSTS_N_INSNS (19), /* divdi */
994 COSTS_N_INSNS (4), /* fp */
995 COSTS_N_INSNS (4), /* dmul */
996 COSTS_N_INSNS (29), /* sdiv */
997 COSTS_N_INSNS (29), /* ddiv */
998 32, /* cache line size */
999 32, /* l1 cache */
1000 256, /* l2 cache */
1001 1, /* prefetch streams /*/
1002 0, /* SF->DF convert */
1005 /* Instruction costs on E300C2 and E300C3 cores. */
1006 static const
1007 struct processor_costs ppce300c2c3_cost = {
1008 COSTS_N_INSNS (4), /* mulsi */
1009 COSTS_N_INSNS (4), /* mulsi_const */
1010 COSTS_N_INSNS (4), /* mulsi_const9 */
1011 COSTS_N_INSNS (4), /* muldi */
1012 COSTS_N_INSNS (19), /* divsi */
1013 COSTS_N_INSNS (19), /* divdi */
1014 COSTS_N_INSNS (3), /* fp */
1015 COSTS_N_INSNS (4), /* dmul */
1016 COSTS_N_INSNS (18), /* sdiv */
1017 COSTS_N_INSNS (33), /* ddiv */
1019 16, /* l1 cache */
1020 16, /* l2 cache */
1021 1, /* prefetch streams /*/
1022 0, /* SF->DF convert */
1025 /* Instruction costs on PPCE500MC processors. */
1026 static const
1027 struct processor_costs ppce500mc_cost = {
1028 COSTS_N_INSNS (4), /* mulsi */
1029 COSTS_N_INSNS (4), /* mulsi_const */
1030 COSTS_N_INSNS (4), /* mulsi_const9 */
1031 COSTS_N_INSNS (4), /* muldi */
1032 COSTS_N_INSNS (14), /* divsi */
1033 COSTS_N_INSNS (14), /* divdi */
1034 COSTS_N_INSNS (8), /* fp */
1035 COSTS_N_INSNS (10), /* dmul */
1036 COSTS_N_INSNS (36), /* sdiv */
1037 COSTS_N_INSNS (66), /* ddiv */
1038 64, /* cache line size */
1039 32, /* l1 cache */
1040 128, /* l2 cache */
1041 1, /* prefetch streams /*/
1042 0, /* SF->DF convert */
1045 /* Instruction costs on PPCE500MC64 processors. */
1046 static const
1047 struct processor_costs ppce500mc64_cost = {
1048 COSTS_N_INSNS (4), /* mulsi */
1049 COSTS_N_INSNS (4), /* mulsi_const */
1050 COSTS_N_INSNS (4), /* mulsi_const9 */
1051 COSTS_N_INSNS (4), /* muldi */
1052 COSTS_N_INSNS (14), /* divsi */
1053 COSTS_N_INSNS (14), /* divdi */
1054 COSTS_N_INSNS (4), /* fp */
1055 COSTS_N_INSNS (10), /* dmul */
1056 COSTS_N_INSNS (36), /* sdiv */
1057 COSTS_N_INSNS (66), /* ddiv */
1058 64, /* cache line size */
1059 32, /* l1 cache */
1060 128, /* l2 cache */
1061 1, /* prefetch streams /*/
1062 0, /* SF->DF convert */
1065 /* Instruction costs on PPCE5500 processors. */
1066 static const
1067 struct processor_costs ppce5500_cost = {
1068 COSTS_N_INSNS (5), /* mulsi */
1069 COSTS_N_INSNS (5), /* mulsi_const */
1070 COSTS_N_INSNS (4), /* mulsi_const9 */
1071 COSTS_N_INSNS (5), /* muldi */
1072 COSTS_N_INSNS (14), /* divsi */
1073 COSTS_N_INSNS (14), /* divdi */
1074 COSTS_N_INSNS (7), /* fp */
1075 COSTS_N_INSNS (10), /* dmul */
1076 COSTS_N_INSNS (36), /* sdiv */
1077 COSTS_N_INSNS (66), /* ddiv */
1078 64, /* cache line size */
1079 32, /* l1 cache */
1080 128, /* l2 cache */
1081 1, /* prefetch streams /*/
1082 0, /* SF->DF convert */
1085 /* Instruction costs on PPCE6500 processors. */
1086 static const
1087 struct processor_costs ppce6500_cost = {
1088 COSTS_N_INSNS (5), /* mulsi */
1089 COSTS_N_INSNS (5), /* mulsi_const */
1090 COSTS_N_INSNS (4), /* mulsi_const9 */
1091 COSTS_N_INSNS (5), /* muldi */
1092 COSTS_N_INSNS (14), /* divsi */
1093 COSTS_N_INSNS (14), /* divdi */
1094 COSTS_N_INSNS (7), /* fp */
1095 COSTS_N_INSNS (10), /* dmul */
1096 COSTS_N_INSNS (36), /* sdiv */
1097 COSTS_N_INSNS (66), /* ddiv */
1098 64, /* cache line size */
1099 32, /* l1 cache */
1100 128, /* l2 cache */
1101 1, /* prefetch streams /*/
1102 0, /* SF->DF convert */
1105 /* Instruction costs on AppliedMicro Titan processors. */
1106 static const
1107 struct processor_costs titan_cost = {
1108 COSTS_N_INSNS (5), /* mulsi */
1109 COSTS_N_INSNS (5), /* mulsi_const */
1110 COSTS_N_INSNS (5), /* mulsi_const9 */
1111 COSTS_N_INSNS (5), /* muldi */
1112 COSTS_N_INSNS (18), /* divsi */
1113 COSTS_N_INSNS (18), /* divdi */
1114 COSTS_N_INSNS (10), /* fp */
1115 COSTS_N_INSNS (10), /* dmul */
1116 COSTS_N_INSNS (46), /* sdiv */
1117 COSTS_N_INSNS (72), /* ddiv */
1118 32, /* cache line size */
1119 32, /* l1 cache */
1120 512, /* l2 cache */
1121 1, /* prefetch streams /*/
1122 0, /* SF->DF convert */
1125 /* Instruction costs on POWER4 and POWER5 processors. */
1126 static const
1127 struct processor_costs power4_cost = {
1128 COSTS_N_INSNS (3), /* mulsi */
1129 COSTS_N_INSNS (2), /* mulsi_const */
1130 COSTS_N_INSNS (2), /* mulsi_const9 */
1131 COSTS_N_INSNS (4), /* muldi */
1132 COSTS_N_INSNS (18), /* divsi */
1133 COSTS_N_INSNS (34), /* divdi */
1134 COSTS_N_INSNS (3), /* fp */
1135 COSTS_N_INSNS (3), /* dmul */
1136 COSTS_N_INSNS (17), /* sdiv */
1137 COSTS_N_INSNS (17), /* ddiv */
1138 128, /* cache line size */
1139 32, /* l1 cache */
1140 1024, /* l2 cache */
1141 8, /* prefetch streams /*/
1142 0, /* SF->DF convert */
1145 /* Instruction costs on POWER6 processors. */
1146 static const
1147 struct processor_costs power6_cost = {
1148 COSTS_N_INSNS (8), /* mulsi */
1149 COSTS_N_INSNS (8), /* mulsi_const */
1150 COSTS_N_INSNS (8), /* mulsi_const9 */
1151 COSTS_N_INSNS (8), /* muldi */
1152 COSTS_N_INSNS (22), /* divsi */
1153 COSTS_N_INSNS (28), /* divdi */
1154 COSTS_N_INSNS (3), /* fp */
1155 COSTS_N_INSNS (3), /* dmul */
1156 COSTS_N_INSNS (13), /* sdiv */
1157 COSTS_N_INSNS (16), /* ddiv */
1158 128, /* cache line size */
1159 64, /* l1 cache */
1160 2048, /* l2 cache */
1161 16, /* prefetch streams */
1162 0, /* SF->DF convert */
1165 /* Instruction costs on POWER7 processors. */
1166 static const
1167 struct processor_costs power7_cost = {
1168 COSTS_N_INSNS (2), /* mulsi */
1169 COSTS_N_INSNS (2), /* mulsi_const */
1170 COSTS_N_INSNS (2), /* mulsi_const9 */
1171 COSTS_N_INSNS (2), /* muldi */
1172 COSTS_N_INSNS (18), /* divsi */
1173 COSTS_N_INSNS (34), /* divdi */
1174 COSTS_N_INSNS (3), /* fp */
1175 COSTS_N_INSNS (3), /* dmul */
1176 COSTS_N_INSNS (13), /* sdiv */
1177 COSTS_N_INSNS (16), /* ddiv */
1178 128, /* cache line size */
1179 32, /* l1 cache */
1180 256, /* l2 cache */
1181 12, /* prefetch streams */
1182 COSTS_N_INSNS (3), /* SF->DF convert */
1185 /* Instruction costs on POWER8 processors. */
1186 static const
1187 struct processor_costs power8_cost = {
1188 COSTS_N_INSNS (3), /* mulsi */
1189 COSTS_N_INSNS (3), /* mulsi_const */
1190 COSTS_N_INSNS (3), /* mulsi_const9 */
1191 COSTS_N_INSNS (3), /* muldi */
1192 COSTS_N_INSNS (19), /* divsi */
1193 COSTS_N_INSNS (35), /* divdi */
1194 COSTS_N_INSNS (3), /* fp */
1195 COSTS_N_INSNS (3), /* dmul */
1196 COSTS_N_INSNS (14), /* sdiv */
1197 COSTS_N_INSNS (17), /* ddiv */
1198 128, /* cache line size */
1199 32, /* l1 cache */
1200 256, /* l2 cache */
1201 12, /* prefetch streams */
1202 COSTS_N_INSNS (3), /* SF->DF convert */
1205 /* Instruction costs on POWER9 processors. */
1206 static const
1207 struct processor_costs power9_cost = {
1208 COSTS_N_INSNS (3), /* mulsi */
1209 COSTS_N_INSNS (3), /* mulsi_const */
1210 COSTS_N_INSNS (3), /* mulsi_const9 */
1211 COSTS_N_INSNS (3), /* muldi */
1212 COSTS_N_INSNS (8), /* divsi */
1213 COSTS_N_INSNS (12), /* divdi */
1214 COSTS_N_INSNS (3), /* fp */
1215 COSTS_N_INSNS (3), /* dmul */
1216 COSTS_N_INSNS (13), /* sdiv */
1217 COSTS_N_INSNS (18), /* ddiv */
1218 128, /* cache line size */
1219 32, /* l1 cache */
1220 512, /* l2 cache */
1221 8, /* prefetch streams */
1222 COSTS_N_INSNS (3), /* SF->DF convert */
1225 /* Instruction costs on POWER A2 processors. */
1226 static const
1227 struct processor_costs ppca2_cost = {
1228 COSTS_N_INSNS (16), /* mulsi */
1229 COSTS_N_INSNS (16), /* mulsi_const */
1230 COSTS_N_INSNS (16), /* mulsi_const9 */
1231 COSTS_N_INSNS (16), /* muldi */
1232 COSTS_N_INSNS (22), /* divsi */
1233 COSTS_N_INSNS (28), /* divdi */
1234 COSTS_N_INSNS (3), /* fp */
1235 COSTS_N_INSNS (3), /* dmul */
1236 COSTS_N_INSNS (59), /* sdiv */
1237 COSTS_N_INSNS (72), /* ddiv */
1239 16, /* l1 cache */
1240 2048, /* l2 cache */
1241 16, /* prefetch streams */
1242 0, /* SF->DF convert */
1246 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
1247 #undef RS6000_BUILTIN_0
1248 #undef RS6000_BUILTIN_1
1249 #undef RS6000_BUILTIN_2
1250 #undef RS6000_BUILTIN_3
1251 #undef RS6000_BUILTIN_A
1252 #undef RS6000_BUILTIN_D
1253 #undef RS6000_BUILTIN_E
1254 #undef RS6000_BUILTIN_H
1255 #undef RS6000_BUILTIN_P
1256 #undef RS6000_BUILTIN_Q
1257 #undef RS6000_BUILTIN_S
1258 #undef RS6000_BUILTIN_X
1260 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
1261 { NAME, ICODE, MASK, ATTR },
1263 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1264 { NAME, ICODE, MASK, ATTR },
1266 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1267 { NAME, ICODE, MASK, ATTR },
1269 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1270 { NAME, ICODE, MASK, ATTR },
1272 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1273 { NAME, ICODE, MASK, ATTR },
1275 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1276 { NAME, ICODE, MASK, ATTR },
1278 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
1279 { NAME, ICODE, MASK, ATTR },
1281 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1282 { NAME, ICODE, MASK, ATTR },
1284 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1285 { NAME, ICODE, MASK, ATTR },
1287 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
1288 { NAME, ICODE, MASK, ATTR },
1290 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
1291 { NAME, ICODE, MASK, ATTR },
1293 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1294 { NAME, ICODE, MASK, ATTR },
1296 struct rs6000_builtin_info_type {
1297 const char *name;
1298 const enum insn_code icode;
1299 const HOST_WIDE_INT mask;
1300 const unsigned attr;
1303 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
1305 #include "powerpcspe-builtin.def"
1308 #undef RS6000_BUILTIN_0
1309 #undef RS6000_BUILTIN_1
1310 #undef RS6000_BUILTIN_2
1311 #undef RS6000_BUILTIN_3
1312 #undef RS6000_BUILTIN_A
1313 #undef RS6000_BUILTIN_D
1314 #undef RS6000_BUILTIN_E
1315 #undef RS6000_BUILTIN_H
1316 #undef RS6000_BUILTIN_P
1317 #undef RS6000_BUILTIN_Q
1318 #undef RS6000_BUILTIN_S
1319 #undef RS6000_BUILTIN_X
1321 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1322 static tree (*rs6000_veclib_handler) (combined_fn, tree, tree);
1325 static bool rs6000_debug_legitimate_address_p (machine_mode, rtx, bool);
1326 static bool spe_func_has_64bit_regs_p (void);
1327 static struct machine_function * rs6000_init_machine_status (void);
1328 static int rs6000_ra_ever_killed (void);
1329 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
1330 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
1331 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
1332 static tree rs6000_builtin_vectorized_libmass (combined_fn, tree, tree);
1333 static void rs6000_emit_set_long_const (rtx, HOST_WIDE_INT);
1334 static int rs6000_memory_move_cost (machine_mode, reg_class_t, bool);
1335 static bool rs6000_debug_rtx_costs (rtx, machine_mode, int, int, int *, bool);
1336 static int rs6000_debug_address_cost (rtx, machine_mode, addr_space_t,
1337 bool);
1338 static int rs6000_debug_adjust_cost (rtx_insn *, int, rtx_insn *, int,
1339 unsigned int);
1340 static bool is_microcoded_insn (rtx_insn *);
1341 static bool is_nonpipeline_insn (rtx_insn *);
1342 static bool is_cracked_insn (rtx_insn *);
1343 static bool is_load_insn (rtx, rtx *);
1344 static bool is_store_insn (rtx, rtx *);
1345 static bool set_to_load_agen (rtx_insn *,rtx_insn *);
1346 static bool insn_terminates_group_p (rtx_insn *, enum group_termination);
1347 static bool insn_must_be_first_in_group (rtx_insn *);
1348 static bool insn_must_be_last_in_group (rtx_insn *);
1349 static void altivec_init_builtins (void);
1350 static tree builtin_function_type (machine_mode, machine_mode,
1351 machine_mode, machine_mode,
1352 enum rs6000_builtins, const char *name);
1353 static void rs6000_common_init_builtins (void);
1354 static void paired_init_builtins (void);
1355 static rtx paired_expand_predicate_builtin (enum insn_code, tree, rtx);
1356 static void spe_init_builtins (void);
1357 static void htm_init_builtins (void);
1358 static rtx spe_expand_predicate_builtin (enum insn_code, tree, rtx);
1359 static rtx spe_expand_evsel_builtin (enum insn_code, tree, rtx);
1360 static int rs6000_emit_int_cmove (rtx, rtx, rtx, rtx);
1361 static rs6000_stack_t *rs6000_stack_info (void);
1362 static void is_altivec_return_reg (rtx, void *);
1363 int easy_vector_constant (rtx, machine_mode);
1364 static rtx rs6000_debug_legitimize_address (rtx, rtx, machine_mode);
1365 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
1366 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
1367 bool, bool);
1368 #if TARGET_MACHO
1369 static void macho_branch_islands (void);
1370 #endif
1371 static rtx rs6000_legitimize_reload_address (rtx, machine_mode, int, int,
1372 int, int *);
1373 static rtx rs6000_debug_legitimize_reload_address (rtx, machine_mode, int,
1374 int, int, int *);
1375 static bool rs6000_mode_dependent_address (const_rtx);
1376 static bool rs6000_debug_mode_dependent_address (const_rtx);
1377 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
1378 machine_mode, rtx);
1379 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
1380 machine_mode,
1381 rtx);
1382 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
1383 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
1384 enum reg_class);
1385 static bool rs6000_secondary_memory_needed (enum reg_class, enum reg_class,
1386 machine_mode);
1387 static bool rs6000_debug_secondary_memory_needed (enum reg_class,
1388 enum reg_class,
1389 machine_mode);
1390 static bool rs6000_cannot_change_mode_class (machine_mode,
1391 machine_mode,
1392 enum reg_class);
1393 static bool rs6000_debug_cannot_change_mode_class (machine_mode,
1394 machine_mode,
1395 enum reg_class);
1396 static bool rs6000_save_toc_in_prologue_p (void);
1397 static rtx rs6000_internal_arg_pointer (void);
1399 rtx (*rs6000_legitimize_reload_address_ptr) (rtx, machine_mode, int, int,
1400 int, int *)
1401 = rs6000_legitimize_reload_address;
1403 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
1404 = rs6000_mode_dependent_address;
1406 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1407 machine_mode, rtx)
1408 = rs6000_secondary_reload_class;
1410 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1411 = rs6000_preferred_reload_class;
1413 bool (*rs6000_secondary_memory_needed_ptr) (enum reg_class, enum reg_class,
1414 machine_mode)
1415 = rs6000_secondary_memory_needed;
1417 bool (*rs6000_cannot_change_mode_class_ptr) (machine_mode,
1418 machine_mode,
1419 enum reg_class)
1420 = rs6000_cannot_change_mode_class;
1422 const int INSN_NOT_AVAILABLE = -1;
1424 static void rs6000_print_isa_options (FILE *, int, const char *,
1425 HOST_WIDE_INT);
1426 static void rs6000_print_builtin_options (FILE *, int, const char *,
1427 HOST_WIDE_INT);
1428 static HOST_WIDE_INT rs6000_disable_incompatible_switches (void);
1430 static enum rs6000_reg_type register_to_reg_type (rtx, bool *);
1431 static bool rs6000_secondary_reload_move (enum rs6000_reg_type,
1432 enum rs6000_reg_type,
1433 machine_mode,
1434 secondary_reload_info *,
1435 bool);
1436 rtl_opt_pass *make_pass_analyze_swaps (gcc::context*);
1437 static bool rs6000_keep_leaf_when_profiled () __attribute__ ((unused));
1438 static tree rs6000_fold_builtin (tree, int, tree *, bool);
1440 /* Hash table stuff for keeping track of TOC entries. */
1442 struct GTY((for_user)) toc_hash_struct
1444 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1445 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1446 rtx key;
1447 machine_mode key_mode;
1448 int labelno;
1451 struct toc_hasher : ggc_ptr_hash<toc_hash_struct>
1453 static hashval_t hash (toc_hash_struct *);
1454 static bool equal (toc_hash_struct *, toc_hash_struct *);
1457 static GTY (()) hash_table<toc_hasher> *toc_hash_table;
1459 /* Hash table to keep track of the argument types for builtin functions. */
1461 struct GTY((for_user)) builtin_hash_struct
1463 tree type;
1464 machine_mode mode[4]; /* return value + 3 arguments. */
1465 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1468 struct builtin_hasher : ggc_ptr_hash<builtin_hash_struct>
1470 static hashval_t hash (builtin_hash_struct *);
1471 static bool equal (builtin_hash_struct *, builtin_hash_struct *);
1474 static GTY (()) hash_table<builtin_hasher> *builtin_hash_table;
1477 /* Default register names. */
1478 char rs6000_reg_names[][8] =
1480 "0", "1", "2", "3", "4", "5", "6", "7",
1481 "8", "9", "10", "11", "12", "13", "14", "15",
1482 "16", "17", "18", "19", "20", "21", "22", "23",
1483 "24", "25", "26", "27", "28", "29", "30", "31",
1484 "0", "1", "2", "3", "4", "5", "6", "7",
1485 "8", "9", "10", "11", "12", "13", "14", "15",
1486 "16", "17", "18", "19", "20", "21", "22", "23",
1487 "24", "25", "26", "27", "28", "29", "30", "31",
1488 "mq", "lr", "ctr","ap",
1489 "0", "1", "2", "3", "4", "5", "6", "7",
1490 "ca",
1491 /* AltiVec registers. */
1492 "0", "1", "2", "3", "4", "5", "6", "7",
1493 "8", "9", "10", "11", "12", "13", "14", "15",
1494 "16", "17", "18", "19", "20", "21", "22", "23",
1495 "24", "25", "26", "27", "28", "29", "30", "31",
1496 "vrsave", "vscr",
1497 /* SPE registers. */
1498 "spe_acc", "spefscr",
1499 /* Soft frame pointer. */
1500 "sfp",
1501 /* HTM SPR registers. */
1502 "tfhar", "tfiar", "texasr",
1503 /* SPE High registers. */
1504 "0", "1", "2", "3", "4", "5", "6", "7",
1505 "8", "9", "10", "11", "12", "13", "14", "15",
1506 "16", "17", "18", "19", "20", "21", "22", "23",
1507 "24", "25", "26", "27", "28", "29", "30", "31"
1510 #ifdef TARGET_REGNAMES
1511 static const char alt_reg_names[][8] =
1513 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1514 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1515 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1516 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1517 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1518 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1519 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1520 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1521 "mq", "lr", "ctr", "ap",
1522 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1523 "ca",
1524 /* AltiVec registers. */
1525 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1526 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1527 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1528 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1529 "vrsave", "vscr",
1530 /* SPE registers. */
1531 "spe_acc", "spefscr",
1532 /* Soft frame pointer. */
1533 "sfp",
1534 /* HTM SPR registers. */
1535 "tfhar", "tfiar", "texasr",
1536 /* SPE High registers. */
1537 "%rh0", "%rh1", "%rh2", "%rh3", "%rh4", "%rh5", "%rh6", "%rh7",
1538 "%rh8", "%rh9", "%rh10", "%r11", "%rh12", "%rh13", "%rh14", "%rh15",
1539 "%rh16", "%rh17", "%rh18", "%rh19", "%rh20", "%rh21", "%rh22", "%rh23",
1540 "%rh24", "%rh25", "%rh26", "%rh27", "%rh28", "%rh29", "%rh30", "%rh31"
1542 #endif
1544 /* Table of valid machine attributes. */
1546 static const struct attribute_spec rs6000_attribute_table[] =
1548 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
1549 affects_type_identity } */
1550 { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute,
1551 false },
1552 { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1553 false },
1554 { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1555 false },
1556 { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1557 false },
1558 { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1559 false },
1560 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1561 SUBTARGET_ATTRIBUTE_TABLE,
1562 #endif
1563 { NULL, 0, 0, false, false, false, NULL, false }
1566 #ifndef TARGET_PROFILE_KERNEL
1567 #define TARGET_PROFILE_KERNEL 0
1568 #endif
1570 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1571 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1573 /* Initialize the GCC target structure. */
1574 #undef TARGET_ATTRIBUTE_TABLE
1575 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1576 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1577 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1578 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1579 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1581 #undef TARGET_ASM_ALIGNED_DI_OP
1582 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1584 /* Default unaligned ops are only provided for ELF. Find the ops needed
1585 for non-ELF systems. */
1586 #ifndef OBJECT_FORMAT_ELF
1587 #if TARGET_XCOFF
1588 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1589 64-bit targets. */
1590 #undef TARGET_ASM_UNALIGNED_HI_OP
1591 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1592 #undef TARGET_ASM_UNALIGNED_SI_OP
1593 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1594 #undef TARGET_ASM_UNALIGNED_DI_OP
1595 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1596 #else
1597 /* For Darwin. */
1598 #undef TARGET_ASM_UNALIGNED_HI_OP
1599 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1600 #undef TARGET_ASM_UNALIGNED_SI_OP
1601 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1602 #undef TARGET_ASM_UNALIGNED_DI_OP
1603 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1604 #undef TARGET_ASM_ALIGNED_DI_OP
1605 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1606 #endif
1607 #endif
1609 /* This hook deals with fixups for relocatable code and DI-mode objects
1610 in 64-bit code. */
1611 #undef TARGET_ASM_INTEGER
1612 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1614 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1615 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1616 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1617 #endif
1619 #undef TARGET_SET_UP_BY_PROLOGUE
1620 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1622 #undef TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS
1623 #define TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS rs6000_get_separate_components
1624 #undef TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB
1625 #define TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB rs6000_components_for_bb
1626 #undef TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS
1627 #define TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS rs6000_disqualify_components
1628 #undef TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS
1629 #define TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS rs6000_emit_prologue_components
1630 #undef TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS
1631 #define TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS rs6000_emit_epilogue_components
1632 #undef TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS
1633 #define TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS rs6000_set_handled_components
1635 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1636 #define TARGET_EXTRA_LIVE_ON_ENTRY rs6000_live_on_entry
1638 #undef TARGET_INTERNAL_ARG_POINTER
1639 #define TARGET_INTERNAL_ARG_POINTER rs6000_internal_arg_pointer
1641 #undef TARGET_HAVE_TLS
1642 #define TARGET_HAVE_TLS HAVE_AS_TLS
1644 #undef TARGET_CANNOT_FORCE_CONST_MEM
1645 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1647 #undef TARGET_DELEGITIMIZE_ADDRESS
1648 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1650 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1651 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1653 #undef TARGET_LEGITIMATE_COMBINED_INSN
1654 #define TARGET_LEGITIMATE_COMBINED_INSN rs6000_legitimate_combined_insn
1656 #undef TARGET_ASM_FUNCTION_PROLOGUE
1657 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1658 #undef TARGET_ASM_FUNCTION_EPILOGUE
1659 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1661 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1662 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1664 #undef TARGET_LEGITIMIZE_ADDRESS
1665 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1667 #undef TARGET_SCHED_VARIABLE_ISSUE
1668 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1670 #undef TARGET_SCHED_ISSUE_RATE
1671 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1672 #undef TARGET_SCHED_ADJUST_COST
1673 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1674 #undef TARGET_SCHED_ADJUST_PRIORITY
1675 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1676 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1677 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1678 #undef TARGET_SCHED_INIT
1679 #define TARGET_SCHED_INIT rs6000_sched_init
1680 #undef TARGET_SCHED_FINISH
1681 #define TARGET_SCHED_FINISH rs6000_sched_finish
1682 #undef TARGET_SCHED_REORDER
1683 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1684 #undef TARGET_SCHED_REORDER2
1685 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1687 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1688 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1690 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1691 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1693 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1694 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1695 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1696 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1697 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1698 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1699 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1700 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1702 #undef TARGET_SCHED_CAN_SPECULATE_INSN
1703 #define TARGET_SCHED_CAN_SPECULATE_INSN rs6000_sched_can_speculate_insn
1705 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1706 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1707 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1708 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1709 rs6000_builtin_support_vector_misalignment
1710 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1711 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1712 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1713 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1714 rs6000_builtin_vectorization_cost
1715 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1716 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1717 rs6000_preferred_simd_mode
1718 #undef TARGET_VECTORIZE_INIT_COST
1719 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1720 #undef TARGET_VECTORIZE_ADD_STMT_COST
1721 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1722 #undef TARGET_VECTORIZE_FINISH_COST
1723 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1724 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1725 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1727 #undef TARGET_INIT_BUILTINS
1728 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1729 #undef TARGET_BUILTIN_DECL
1730 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1732 #undef TARGET_FOLD_BUILTIN
1733 #define TARGET_FOLD_BUILTIN rs6000_fold_builtin
1734 #undef TARGET_GIMPLE_FOLD_BUILTIN
1735 #define TARGET_GIMPLE_FOLD_BUILTIN rs6000_gimple_fold_builtin
1737 #undef TARGET_EXPAND_BUILTIN
1738 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1740 #undef TARGET_MANGLE_TYPE
1741 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1743 #undef TARGET_INIT_LIBFUNCS
1744 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1746 #if TARGET_MACHO
1747 #undef TARGET_BINDS_LOCAL_P
1748 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1749 #endif
1751 #undef TARGET_MS_BITFIELD_LAYOUT_P
1752 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1754 #undef TARGET_ASM_OUTPUT_MI_THUNK
1755 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1757 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1758 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1760 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1761 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1763 #undef TARGET_REGISTER_MOVE_COST
1764 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1765 #undef TARGET_MEMORY_MOVE_COST
1766 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1767 #undef TARGET_CANNOT_COPY_INSN_P
1768 #define TARGET_CANNOT_COPY_INSN_P rs6000_cannot_copy_insn_p
1769 #undef TARGET_RTX_COSTS
1770 #define TARGET_RTX_COSTS rs6000_rtx_costs
1771 #undef TARGET_ADDRESS_COST
1772 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1774 #undef TARGET_DWARF_REGISTER_SPAN
1775 #define TARGET_DWARF_REGISTER_SPAN rs6000_dwarf_register_span
1777 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1778 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1780 #undef TARGET_MEMBER_TYPE_FORCES_BLK
1781 #define TARGET_MEMBER_TYPE_FORCES_BLK rs6000_member_type_forces_blk
1783 #undef TARGET_PROMOTE_FUNCTION_MODE
1784 #define TARGET_PROMOTE_FUNCTION_MODE rs6000_promote_function_mode
1786 #undef TARGET_RETURN_IN_MEMORY
1787 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1789 #undef TARGET_RETURN_IN_MSB
1790 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1792 #undef TARGET_SETUP_INCOMING_VARARGS
1793 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1795 /* Always strict argument naming on rs6000. */
1796 #undef TARGET_STRICT_ARGUMENT_NAMING
1797 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1798 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1799 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1800 #undef TARGET_SPLIT_COMPLEX_ARG
1801 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1802 #undef TARGET_MUST_PASS_IN_STACK
1803 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1804 #undef TARGET_PASS_BY_REFERENCE
1805 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1806 #undef TARGET_ARG_PARTIAL_BYTES
1807 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1808 #undef TARGET_FUNCTION_ARG_ADVANCE
1809 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1810 #undef TARGET_FUNCTION_ARG
1811 #define TARGET_FUNCTION_ARG rs6000_function_arg
1812 #undef TARGET_FUNCTION_ARG_BOUNDARY
1813 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1815 #undef TARGET_BUILD_BUILTIN_VA_LIST
1816 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1818 #undef TARGET_EXPAND_BUILTIN_VA_START
1819 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1821 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1822 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1824 #undef TARGET_EH_RETURN_FILTER_MODE
1825 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1827 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1828 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1830 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1831 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1833 #undef TARGET_FLOATN_MODE
1834 #define TARGET_FLOATN_MODE rs6000_floatn_mode
1836 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1837 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1839 #undef TARGET_ASM_LOOP_ALIGN_MAX_SKIP
1840 #define TARGET_ASM_LOOP_ALIGN_MAX_SKIP rs6000_loop_align_max_skip
1842 #undef TARGET_MD_ASM_ADJUST
1843 #define TARGET_MD_ASM_ADJUST rs6000_md_asm_adjust
1845 #undef TARGET_OPTION_OVERRIDE
1846 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1848 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1849 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1850 rs6000_builtin_vectorized_function
1852 #undef TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION
1853 #define TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION \
1854 rs6000_builtin_md_vectorized_function
1856 #undef TARGET_STACK_PROTECT_GUARD
1857 #define TARGET_STACK_PROTECT_GUARD rs6000_init_stack_protect_guard
1859 #if !TARGET_MACHO
1860 #undef TARGET_STACK_PROTECT_FAIL
1861 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1862 #endif
1864 #ifdef HAVE_AS_TLS
1865 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1866 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1867 #endif
1869 /* Use a 32-bit anchor range. This leads to sequences like:
1871 addis tmp,anchor,high
1872 add dest,tmp,low
1874 where tmp itself acts as an anchor, and can be shared between
1875 accesses to the same 64k page. */
1876 #undef TARGET_MIN_ANCHOR_OFFSET
1877 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1878 #undef TARGET_MAX_ANCHOR_OFFSET
1879 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1880 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1881 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1882 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1883 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1885 #undef TARGET_BUILTIN_RECIPROCAL
1886 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1888 #undef TARGET_EXPAND_TO_RTL_HOOK
1889 #define TARGET_EXPAND_TO_RTL_HOOK rs6000_alloc_sdmode_stack_slot
1891 #undef TARGET_INSTANTIATE_DECLS
1892 #define TARGET_INSTANTIATE_DECLS rs6000_instantiate_decls
1894 #undef TARGET_SECONDARY_RELOAD
1895 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1897 #undef TARGET_LEGITIMATE_ADDRESS_P
1898 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1900 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1901 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1903 #undef TARGET_LRA_P
1904 #define TARGET_LRA_P rs6000_lra_p
1906 #undef TARGET_COMPUTE_PRESSURE_CLASSES
1907 #define TARGET_COMPUTE_PRESSURE_CLASSES rs6000_compute_pressure_classes
1909 #undef TARGET_CAN_ELIMINATE
1910 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1912 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1913 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1915 #undef TARGET_SCHED_REASSOCIATION_WIDTH
1916 #define TARGET_SCHED_REASSOCIATION_WIDTH rs6000_reassociation_width
1918 #undef TARGET_TRAMPOLINE_INIT
1919 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1921 #undef TARGET_FUNCTION_VALUE
1922 #define TARGET_FUNCTION_VALUE rs6000_function_value
1924 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1925 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1927 #undef TARGET_OPTION_SAVE
1928 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1930 #undef TARGET_OPTION_RESTORE
1931 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1933 #undef TARGET_OPTION_PRINT
1934 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1936 #undef TARGET_CAN_INLINE_P
1937 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1939 #undef TARGET_SET_CURRENT_FUNCTION
1940 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1942 #undef TARGET_LEGITIMATE_CONSTANT_P
1943 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1945 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
1946 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK rs6000_vectorize_vec_perm_const_ok
1948 #undef TARGET_CAN_USE_DOLOOP_P
1949 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1951 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
1952 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV rs6000_atomic_assign_expand_fenv
1954 #undef TARGET_LIBGCC_CMP_RETURN_MODE
1955 #define TARGET_LIBGCC_CMP_RETURN_MODE rs6000_abi_word_mode
1956 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
1957 #define TARGET_LIBGCC_SHIFT_COUNT_MODE rs6000_abi_word_mode
1958 #undef TARGET_UNWIND_WORD_MODE
1959 #define TARGET_UNWIND_WORD_MODE rs6000_abi_word_mode
1961 #undef TARGET_OFFLOAD_OPTIONS
1962 #define TARGET_OFFLOAD_OPTIONS rs6000_offload_options
1964 #undef TARGET_C_MODE_FOR_SUFFIX
1965 #define TARGET_C_MODE_FOR_SUFFIX rs6000_c_mode_for_suffix
1967 #undef TARGET_INVALID_BINARY_OP
1968 #define TARGET_INVALID_BINARY_OP rs6000_invalid_binary_op
1970 #undef TARGET_OPTAB_SUPPORTED_P
1971 #define TARGET_OPTAB_SUPPORTED_P rs6000_optab_supported_p
1973 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
1974 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
1977 /* Processor table. */
1978 struct rs6000_ptt
1980 const char *const name; /* Canonical processor name. */
1981 const enum processor_type processor; /* Processor type enum value. */
1982 const HOST_WIDE_INT target_enable; /* Target flags to enable. */
1985 static struct rs6000_ptt const processor_target_table[] =
1987 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1988 #include "powerpcspe-cpus.def"
1989 #undef RS6000_CPU
1992 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
1993 name is invalid. */
1995 static int
1996 rs6000_cpu_name_lookup (const char *name)
1998 size_t i;
2000 if (name != NULL)
2002 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
2003 if (! strcmp (name, processor_target_table[i].name))
2004 return (int)i;
2007 return -1;
2011 /* Return number of consecutive hard regs needed starting at reg REGNO
2012 to hold something of mode MODE.
2013 This is ordinarily the length in words of a value of mode MODE
2014 but can be less for certain modes in special long registers.
2016 For the SPE, GPRs are 64 bits but only 32 bits are visible in
2017 scalar instructions. The upper 32 bits are only available to the
2018 SIMD instructions.
2020 POWER and PowerPC GPRs hold 32 bits worth;
2021 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
2023 static int
2024 rs6000_hard_regno_nregs_internal (int regno, machine_mode mode)
2026 unsigned HOST_WIDE_INT reg_size;
2028 /* 128-bit floating point usually takes 2 registers, unless it is IEEE
2029 128-bit floating point that can go in vector registers, which has VSX
2030 memory addressing. */
2031 if (FP_REGNO_P (regno))
2032 reg_size = (VECTOR_MEM_VSX_P (mode) || FLOAT128_VECTOR_P (mode)
2033 ? UNITS_PER_VSX_WORD
2034 : UNITS_PER_FP_WORD);
2036 else if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
2037 reg_size = UNITS_PER_SPE_WORD;
2039 else if (ALTIVEC_REGNO_P (regno))
2040 reg_size = UNITS_PER_ALTIVEC_WORD;
2042 /* The value returned for SCmode in the E500 double case is 2 for
2043 ABI compatibility; storing an SCmode value in a single register
2044 would require function_arg and rs6000_spe_function_arg to handle
2045 SCmode so as to pass the value correctly in a pair of
2046 registers. */
2047 else if (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode) && mode != SCmode
2048 && !DECIMAL_FLOAT_MODE_P (mode) && SPE_SIMD_REGNO_P (regno))
2049 reg_size = UNITS_PER_FP_WORD;
2051 else
2052 reg_size = UNITS_PER_WORD;
2054 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
2057 /* Value is 1 if hard register REGNO can hold a value of machine-mode
2058 MODE. */
2059 static int
2060 rs6000_hard_regno_mode_ok (int regno, machine_mode mode)
2062 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
2064 if (COMPLEX_MODE_P (mode))
2065 mode = GET_MODE_INNER (mode);
2067 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
2068 register combinations, and use PTImode where we need to deal with quad
2069 word memory operations. Don't allow quad words in the argument or frame
2070 pointer registers, just registers 0..31. */
2071 if (mode == PTImode)
2072 return (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2073 && IN_RANGE (last_regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2074 && ((regno & 1) == 0));
2076 /* VSX registers that overlap the FPR registers are larger than for non-VSX
2077 implementations. Don't allow an item to be split between a FP register
2078 and an Altivec register. Allow TImode in all VSX registers if the user
2079 asked for it. */
2080 if (TARGET_VSX && VSX_REGNO_P (regno)
2081 && (VECTOR_MEM_VSX_P (mode)
2082 || FLOAT128_VECTOR_P (mode)
2083 || reg_addr[mode].scalar_in_vmx_p
2084 || (TARGET_VSX_TIMODE && mode == TImode)
2085 || (TARGET_VADDUQM && mode == V1TImode)))
2087 if (FP_REGNO_P (regno))
2088 return FP_REGNO_P (last_regno);
2090 if (ALTIVEC_REGNO_P (regno))
2092 if (GET_MODE_SIZE (mode) != 16 && !reg_addr[mode].scalar_in_vmx_p)
2093 return 0;
2095 return ALTIVEC_REGNO_P (last_regno);
2099 /* The GPRs can hold any mode, but values bigger than one register
2100 cannot go past R31. */
2101 if (INT_REGNO_P (regno))
2102 return INT_REGNO_P (last_regno);
2104 /* The float registers (except for VSX vector modes) can only hold floating
2105 modes and DImode. */
2106 if (FP_REGNO_P (regno))
2108 if (FLOAT128_VECTOR_P (mode))
2109 return false;
2111 if (SCALAR_FLOAT_MODE_P (mode)
2112 && (mode != TDmode || (regno % 2) == 0)
2113 && FP_REGNO_P (last_regno))
2114 return 1;
2116 if (GET_MODE_CLASS (mode) == MODE_INT)
2118 if(GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
2119 return 1;
2121 if (TARGET_VSX_SMALL_INTEGER)
2123 if (mode == SImode)
2124 return 1;
2126 if (TARGET_P9_VECTOR && (mode == HImode || mode == QImode))
2127 return 1;
2131 if (PAIRED_SIMD_REGNO_P (regno) && TARGET_PAIRED_FLOAT
2132 && PAIRED_VECTOR_MODE (mode))
2133 return 1;
2135 return 0;
2138 /* The CR register can only hold CC modes. */
2139 if (CR_REGNO_P (regno))
2140 return GET_MODE_CLASS (mode) == MODE_CC;
2142 if (CA_REGNO_P (regno))
2143 return mode == Pmode || mode == SImode;
2145 /* AltiVec only in AldyVec registers. */
2146 if (ALTIVEC_REGNO_P (regno))
2147 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
2148 || mode == V1TImode);
2150 /* ...but GPRs can hold SIMD data on the SPE in one register. */
2151 if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
2152 return 1;
2154 /* We cannot put non-VSX TImode or PTImode anywhere except general register
2155 and it must be able to fit within the register set. */
2157 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
2160 /* Print interesting facts about registers. */
2161 static void
2162 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
2164 int r, m;
2166 for (r = first_regno; r <= last_regno; ++r)
2168 const char *comma = "";
2169 int len;
2171 if (first_regno == last_regno)
2172 fprintf (stderr, "%s:\t", reg_name);
2173 else
2174 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
2176 len = 8;
2177 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2178 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
2180 if (len > 70)
2182 fprintf (stderr, ",\n\t");
2183 len = 8;
2184 comma = "";
2187 if (rs6000_hard_regno_nregs[m][r] > 1)
2188 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
2189 rs6000_hard_regno_nregs[m][r]);
2190 else
2191 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
2193 comma = ", ";
2196 if (call_used_regs[r])
2198 if (len > 70)
2200 fprintf (stderr, ",\n\t");
2201 len = 8;
2202 comma = "";
2205 len += fprintf (stderr, "%s%s", comma, "call-used");
2206 comma = ", ";
2209 if (fixed_regs[r])
2211 if (len > 70)
2213 fprintf (stderr, ",\n\t");
2214 len = 8;
2215 comma = "";
2218 len += fprintf (stderr, "%s%s", comma, "fixed");
2219 comma = ", ";
2222 if (len > 70)
2224 fprintf (stderr, ",\n\t");
2225 comma = "";
2228 len += fprintf (stderr, "%sreg-class = %s", comma,
2229 reg_class_names[(int)rs6000_regno_regclass[r]]);
2230 comma = ", ";
2232 if (len > 70)
2234 fprintf (stderr, ",\n\t");
2235 comma = "";
2238 fprintf (stderr, "%sregno = %d\n", comma, r);
2242 static const char *
2243 rs6000_debug_vector_unit (enum rs6000_vector v)
2245 const char *ret;
2247 switch (v)
2249 case VECTOR_NONE: ret = "none"; break;
2250 case VECTOR_ALTIVEC: ret = "altivec"; break;
2251 case VECTOR_VSX: ret = "vsx"; break;
2252 case VECTOR_P8_VECTOR: ret = "p8_vector"; break;
2253 case VECTOR_PAIRED: ret = "paired"; break;
2254 case VECTOR_SPE: ret = "spe"; break;
2255 case VECTOR_OTHER: ret = "other"; break;
2256 default: ret = "unknown"; break;
2259 return ret;
2262 /* Inner function printing just the address mask for a particular reload
2263 register class. */
2264 DEBUG_FUNCTION char *
2265 rs6000_debug_addr_mask (addr_mask_type mask, bool keep_spaces)
2267 static char ret[8];
2268 char *p = ret;
2270 if ((mask & RELOAD_REG_VALID) != 0)
2271 *p++ = 'v';
2272 else if (keep_spaces)
2273 *p++ = ' ';
2275 if ((mask & RELOAD_REG_MULTIPLE) != 0)
2276 *p++ = 'm';
2277 else if (keep_spaces)
2278 *p++ = ' ';
2280 if ((mask & RELOAD_REG_INDEXED) != 0)
2281 *p++ = 'i';
2282 else if (keep_spaces)
2283 *p++ = ' ';
2285 if ((mask & RELOAD_REG_QUAD_OFFSET) != 0)
2286 *p++ = 'O';
2287 else if ((mask & RELOAD_REG_OFFSET) != 0)
2288 *p++ = 'o';
2289 else if (keep_spaces)
2290 *p++ = ' ';
2292 if ((mask & RELOAD_REG_PRE_INCDEC) != 0)
2293 *p++ = '+';
2294 else if (keep_spaces)
2295 *p++ = ' ';
2297 if ((mask & RELOAD_REG_PRE_MODIFY) != 0)
2298 *p++ = '+';
2299 else if (keep_spaces)
2300 *p++ = ' ';
2302 if ((mask & RELOAD_REG_AND_M16) != 0)
2303 *p++ = '&';
2304 else if (keep_spaces)
2305 *p++ = ' ';
2307 *p = '\0';
2309 return ret;
2312 /* Print the address masks in a human readble fashion. */
2313 DEBUG_FUNCTION void
2314 rs6000_debug_print_mode (ssize_t m)
2316 ssize_t rc;
2317 int spaces = 0;
2318 bool fuse_extra_p;
2320 fprintf (stderr, "Mode: %-5s", GET_MODE_NAME (m));
2321 for (rc = 0; rc < N_RELOAD_REG; rc++)
2322 fprintf (stderr, " %s: %s", reload_reg_map[rc].name,
2323 rs6000_debug_addr_mask (reg_addr[m].addr_mask[rc], true));
2325 if ((reg_addr[m].reload_store != CODE_FOR_nothing)
2326 || (reg_addr[m].reload_load != CODE_FOR_nothing))
2327 fprintf (stderr, " Reload=%c%c",
2328 (reg_addr[m].reload_store != CODE_FOR_nothing) ? 's' : '*',
2329 (reg_addr[m].reload_load != CODE_FOR_nothing) ? 'l' : '*');
2330 else
2331 spaces += sizeof (" Reload=sl") - 1;
2333 if (reg_addr[m].scalar_in_vmx_p)
2335 fprintf (stderr, "%*s Upper=y", spaces, "");
2336 spaces = 0;
2338 else
2339 spaces += sizeof (" Upper=y") - 1;
2341 fuse_extra_p = ((reg_addr[m].fusion_gpr_ld != CODE_FOR_nothing)
2342 || reg_addr[m].fused_toc);
2343 if (!fuse_extra_p)
2345 for (rc = 0; rc < N_RELOAD_REG; rc++)
2347 if (rc != RELOAD_REG_ANY)
2349 if (reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing
2350 || reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing
2351 || reg_addr[m].fusion_addi_st[rc] != CODE_FOR_nothing
2352 || reg_addr[m].fusion_addis_ld[rc] != CODE_FOR_nothing
2353 || reg_addr[m].fusion_addis_st[rc] != CODE_FOR_nothing)
2355 fuse_extra_p = true;
2356 break;
2362 if (fuse_extra_p)
2364 fprintf (stderr, "%*s Fuse:", spaces, "");
2365 spaces = 0;
2367 for (rc = 0; rc < N_RELOAD_REG; rc++)
2369 if (rc != RELOAD_REG_ANY)
2371 char load, store;
2373 if (reg_addr[m].fusion_addis_ld[rc] != CODE_FOR_nothing)
2374 load = 'l';
2375 else if (reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing)
2376 load = 'L';
2377 else
2378 load = '-';
2380 if (reg_addr[m].fusion_addis_st[rc] != CODE_FOR_nothing)
2381 store = 's';
2382 else if (reg_addr[m].fusion_addi_st[rc] != CODE_FOR_nothing)
2383 store = 'S';
2384 else
2385 store = '-';
2387 if (load == '-' && store == '-')
2388 spaces += 5;
2389 else
2391 fprintf (stderr, "%*s%c=%c%c", (spaces + 1), "",
2392 reload_reg_map[rc].name[0], load, store);
2393 spaces = 0;
2398 if (reg_addr[m].fusion_gpr_ld != CODE_FOR_nothing)
2400 fprintf (stderr, "%*sP8gpr", (spaces + 1), "");
2401 spaces = 0;
2403 else
2404 spaces += sizeof (" P8gpr") - 1;
2406 if (reg_addr[m].fused_toc)
2408 fprintf (stderr, "%*sToc", (spaces + 1), "");
2409 spaces = 0;
2411 else
2412 spaces += sizeof (" Toc") - 1;
2414 else
2415 spaces += sizeof (" Fuse: G=ls F=ls v=ls P8gpr Toc") - 1;
2417 if (rs6000_vector_unit[m] != VECTOR_NONE
2418 || rs6000_vector_mem[m] != VECTOR_NONE)
2420 fprintf (stderr, "%*s vector: arith=%-10s mem=%s",
2421 spaces, "",
2422 rs6000_debug_vector_unit (rs6000_vector_unit[m]),
2423 rs6000_debug_vector_unit (rs6000_vector_mem[m]));
2426 fputs ("\n", stderr);
2429 #define DEBUG_FMT_ID "%-32s= "
2430 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
2431 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
2432 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
2434 /* Print various interesting information with -mdebug=reg. */
2435 static void
2436 rs6000_debug_reg_global (void)
2438 static const char *const tf[2] = { "false", "true" };
2439 const char *nl = (const char *)0;
2440 int m;
2441 size_t m1, m2, v;
2442 char costly_num[20];
2443 char nop_num[20];
2444 char flags_buffer[40];
2445 const char *costly_str;
2446 const char *nop_str;
2447 const char *trace_str;
2448 const char *abi_str;
2449 const char *cmodel_str;
2450 struct cl_target_option cl_opts;
2452 /* Modes we want tieable information on. */
2453 static const machine_mode print_tieable_modes[] = {
2454 QImode,
2455 HImode,
2456 SImode,
2457 DImode,
2458 TImode,
2459 PTImode,
2460 SFmode,
2461 DFmode,
2462 TFmode,
2463 IFmode,
2464 KFmode,
2465 SDmode,
2466 DDmode,
2467 TDmode,
2468 V8QImode,
2469 V4HImode,
2470 V2SImode,
2471 V16QImode,
2472 V8HImode,
2473 V4SImode,
2474 V2DImode,
2475 V1TImode,
2476 V32QImode,
2477 V16HImode,
2478 V8SImode,
2479 V4DImode,
2480 V2TImode,
2481 V2SFmode,
2482 V4SFmode,
2483 V2DFmode,
2484 V8SFmode,
2485 V4DFmode,
2486 CCmode,
2487 CCUNSmode,
2488 CCEQmode,
2491 /* Virtual regs we are interested in. */
2492 const static struct {
2493 int regno; /* register number. */
2494 const char *name; /* register name. */
2495 } virtual_regs[] = {
2496 { STACK_POINTER_REGNUM, "stack pointer:" },
2497 { TOC_REGNUM, "toc: " },
2498 { STATIC_CHAIN_REGNUM, "static chain: " },
2499 { RS6000_PIC_OFFSET_TABLE_REGNUM, "pic offset: " },
2500 { HARD_FRAME_POINTER_REGNUM, "hard frame: " },
2501 { ARG_POINTER_REGNUM, "arg pointer: " },
2502 { FRAME_POINTER_REGNUM, "frame pointer:" },
2503 { FIRST_PSEUDO_REGISTER, "first pseudo: " },
2504 { FIRST_VIRTUAL_REGISTER, "first virtual:" },
2505 { VIRTUAL_INCOMING_ARGS_REGNUM, "incoming_args:" },
2506 { VIRTUAL_STACK_VARS_REGNUM, "stack_vars: " },
2507 { VIRTUAL_STACK_DYNAMIC_REGNUM, "stack_dynamic:" },
2508 { VIRTUAL_OUTGOING_ARGS_REGNUM, "outgoing_args:" },
2509 { VIRTUAL_CFA_REGNUM, "cfa (frame): " },
2510 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM, "stack boundry:" },
2511 { LAST_VIRTUAL_REGISTER, "last virtual: " },
2514 fputs ("\nHard register information:\n", stderr);
2515 rs6000_debug_reg_print (FIRST_GPR_REGNO, LAST_GPR_REGNO, "gr");
2516 rs6000_debug_reg_print (FIRST_FPR_REGNO, LAST_FPR_REGNO, "fp");
2517 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
2518 LAST_ALTIVEC_REGNO,
2519 "vs");
2520 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
2521 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
2522 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
2523 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
2524 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
2525 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
2526 rs6000_debug_reg_print (SPE_ACC_REGNO, SPE_ACC_REGNO, "spe_a");
2527 rs6000_debug_reg_print (SPEFSCR_REGNO, SPEFSCR_REGNO, "spe_f");
2529 fputs ("\nVirtual/stack/frame registers:\n", stderr);
2530 for (v = 0; v < ARRAY_SIZE (virtual_regs); v++)
2531 fprintf (stderr, "%s regno = %3d\n", virtual_regs[v].name, virtual_regs[v].regno);
2533 fprintf (stderr,
2534 "\n"
2535 "d reg_class = %s\n"
2536 "f reg_class = %s\n"
2537 "v reg_class = %s\n"
2538 "wa reg_class = %s\n"
2539 "wb reg_class = %s\n"
2540 "wd reg_class = %s\n"
2541 "we reg_class = %s\n"
2542 "wf reg_class = %s\n"
2543 "wg reg_class = %s\n"
2544 "wh reg_class = %s\n"
2545 "wi reg_class = %s\n"
2546 "wj reg_class = %s\n"
2547 "wk reg_class = %s\n"
2548 "wl reg_class = %s\n"
2549 "wm reg_class = %s\n"
2550 "wo reg_class = %s\n"
2551 "wp reg_class = %s\n"
2552 "wq reg_class = %s\n"
2553 "wr reg_class = %s\n"
2554 "ws reg_class = %s\n"
2555 "wt reg_class = %s\n"
2556 "wu reg_class = %s\n"
2557 "wv reg_class = %s\n"
2558 "ww reg_class = %s\n"
2559 "wx reg_class = %s\n"
2560 "wy reg_class = %s\n"
2561 "wz reg_class = %s\n"
2562 "wA reg_class = %s\n"
2563 "wH reg_class = %s\n"
2564 "wI reg_class = %s\n"
2565 "wJ reg_class = %s\n"
2566 "wK reg_class = %s\n"
2567 "\n",
2568 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
2569 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
2570 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
2571 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
2572 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wb]],
2573 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
2574 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_we]],
2575 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
2576 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wg]],
2577 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wh]],
2578 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wi]],
2579 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wj]],
2580 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wk]],
2581 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wl]],
2582 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wm]],
2583 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wo]],
2584 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wp]],
2585 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wq]],
2586 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wr]],
2587 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]],
2588 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wt]],
2589 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wu]],
2590 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wv]],
2591 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ww]],
2592 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wx]],
2593 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wy]],
2594 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wz]],
2595 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wA]],
2596 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wH]],
2597 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wI]],
2598 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wJ]],
2599 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wK]]);
2601 nl = "\n";
2602 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2603 rs6000_debug_print_mode (m);
2605 fputs ("\n", stderr);
2607 for (m1 = 0; m1 < ARRAY_SIZE (print_tieable_modes); m1++)
2609 machine_mode mode1 = print_tieable_modes[m1];
2610 bool first_time = true;
2612 nl = (const char *)0;
2613 for (m2 = 0; m2 < ARRAY_SIZE (print_tieable_modes); m2++)
2615 machine_mode mode2 = print_tieable_modes[m2];
2616 if (mode1 != mode2 && MODES_TIEABLE_P (mode1, mode2))
2618 if (first_time)
2620 fprintf (stderr, "Tieable modes %s:", GET_MODE_NAME (mode1));
2621 nl = "\n";
2622 first_time = false;
2625 fprintf (stderr, " %s", GET_MODE_NAME (mode2));
2629 if (!first_time)
2630 fputs ("\n", stderr);
2633 if (nl)
2634 fputs (nl, stderr);
2636 if (rs6000_recip_control)
2638 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
2640 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2641 if (rs6000_recip_bits[m])
2643 fprintf (stderr,
2644 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2645 GET_MODE_NAME (m),
2646 (RS6000_RECIP_AUTO_RE_P (m)
2647 ? "auto"
2648 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
2649 (RS6000_RECIP_AUTO_RSQRTE_P (m)
2650 ? "auto"
2651 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
2654 fputs ("\n", stderr);
2657 if (rs6000_cpu_index >= 0)
2659 const char *name = processor_target_table[rs6000_cpu_index].name;
2660 HOST_WIDE_INT flags
2661 = processor_target_table[rs6000_cpu_index].target_enable;
2663 sprintf (flags_buffer, "-mcpu=%s flags", name);
2664 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2666 else
2667 fprintf (stderr, DEBUG_FMT_S, "cpu", "<none>");
2669 if (rs6000_tune_index >= 0)
2671 const char *name = processor_target_table[rs6000_tune_index].name;
2672 HOST_WIDE_INT flags
2673 = processor_target_table[rs6000_tune_index].target_enable;
2675 sprintf (flags_buffer, "-mtune=%s flags", name);
2676 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2678 else
2679 fprintf (stderr, DEBUG_FMT_S, "tune", "<none>");
2681 cl_target_option_save (&cl_opts, &global_options);
2682 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags",
2683 rs6000_isa_flags);
2685 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags_explicit",
2686 rs6000_isa_flags_explicit);
2688 rs6000_print_builtin_options (stderr, 0, "rs6000_builtin_mask",
2689 rs6000_builtin_mask);
2691 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
2693 fprintf (stderr, DEBUG_FMT_S, "--with-cpu default",
2694 OPTION_TARGET_CPU_DEFAULT ? OPTION_TARGET_CPU_DEFAULT : "<none>");
2696 switch (rs6000_sched_costly_dep)
2698 case max_dep_latency:
2699 costly_str = "max_dep_latency";
2700 break;
2702 case no_dep_costly:
2703 costly_str = "no_dep_costly";
2704 break;
2706 case all_deps_costly:
2707 costly_str = "all_deps_costly";
2708 break;
2710 case true_store_to_load_dep_costly:
2711 costly_str = "true_store_to_load_dep_costly";
2712 break;
2714 case store_to_load_dep_costly:
2715 costly_str = "store_to_load_dep_costly";
2716 break;
2718 default:
2719 costly_str = costly_num;
2720 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
2721 break;
2724 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
2726 switch (rs6000_sched_insert_nops)
2728 case sched_finish_regroup_exact:
2729 nop_str = "sched_finish_regroup_exact";
2730 break;
2732 case sched_finish_pad_groups:
2733 nop_str = "sched_finish_pad_groups";
2734 break;
2736 case sched_finish_none:
2737 nop_str = "sched_finish_none";
2738 break;
2740 default:
2741 nop_str = nop_num;
2742 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
2743 break;
2746 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
2748 switch (rs6000_sdata)
2750 default:
2751 case SDATA_NONE:
2752 break;
2754 case SDATA_DATA:
2755 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
2756 break;
2758 case SDATA_SYSV:
2759 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
2760 break;
2762 case SDATA_EABI:
2763 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
2764 break;
2768 switch (rs6000_traceback)
2770 case traceback_default: trace_str = "default"; break;
2771 case traceback_none: trace_str = "none"; break;
2772 case traceback_part: trace_str = "part"; break;
2773 case traceback_full: trace_str = "full"; break;
2774 default: trace_str = "unknown"; break;
2777 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
2779 switch (rs6000_current_cmodel)
2781 case CMODEL_SMALL: cmodel_str = "small"; break;
2782 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
2783 case CMODEL_LARGE: cmodel_str = "large"; break;
2784 default: cmodel_str = "unknown"; break;
2787 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
2789 switch (rs6000_current_abi)
2791 case ABI_NONE: abi_str = "none"; break;
2792 case ABI_AIX: abi_str = "aix"; break;
2793 case ABI_ELFv2: abi_str = "ELFv2"; break;
2794 case ABI_V4: abi_str = "V4"; break;
2795 case ABI_DARWIN: abi_str = "darwin"; break;
2796 default: abi_str = "unknown"; break;
2799 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
2801 if (rs6000_altivec_abi)
2802 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
2804 if (rs6000_spe_abi)
2805 fprintf (stderr, DEBUG_FMT_S, "spe_abi", "true");
2807 if (rs6000_darwin64_abi)
2808 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
2810 if (rs6000_float_gprs)
2811 fprintf (stderr, DEBUG_FMT_S, "float_gprs", "true");
2813 fprintf (stderr, DEBUG_FMT_S, "fprs",
2814 (TARGET_FPRS ? "true" : "false"));
2816 fprintf (stderr, DEBUG_FMT_S, "single_float",
2817 (TARGET_SINGLE_FLOAT ? "true" : "false"));
2819 fprintf (stderr, DEBUG_FMT_S, "double_float",
2820 (TARGET_DOUBLE_FLOAT ? "true" : "false"));
2822 fprintf (stderr, DEBUG_FMT_S, "soft_float",
2823 (TARGET_SOFT_FLOAT ? "true" : "false"));
2825 fprintf (stderr, DEBUG_FMT_S, "e500_single",
2826 (TARGET_E500_SINGLE ? "true" : "false"));
2828 fprintf (stderr, DEBUG_FMT_S, "e500_double",
2829 (TARGET_E500_DOUBLE ? "true" : "false"));
2831 if (TARGET_LINK_STACK)
2832 fprintf (stderr, DEBUG_FMT_S, "link_stack", "true");
2834 fprintf (stderr, DEBUG_FMT_S, "lra", TARGET_LRA ? "true" : "false");
2836 if (TARGET_P8_FUSION)
2838 char options[80];
2840 strcpy (options, (TARGET_P9_FUSION) ? "power9" : "power8");
2841 if (TARGET_TOC_FUSION)
2842 strcat (options, ", toc");
2844 if (TARGET_P8_FUSION_SIGN)
2845 strcat (options, ", sign");
2847 fprintf (stderr, DEBUG_FMT_S, "fusion", options);
2850 fprintf (stderr, DEBUG_FMT_S, "plt-format",
2851 TARGET_SECURE_PLT ? "secure" : "bss");
2852 fprintf (stderr, DEBUG_FMT_S, "struct-return",
2853 aix_struct_return ? "aix" : "sysv");
2854 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
2855 fprintf (stderr, DEBUG_FMT_S, "sched_groups", tf[!!rs6000_sched_groups]);
2856 fprintf (stderr, DEBUG_FMT_S, "align_branch",
2857 tf[!!rs6000_align_branch_targets]);
2858 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
2859 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
2860 rs6000_long_double_type_size);
2861 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
2862 (int)rs6000_sched_restricted_insns_priority);
2863 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
2864 (int)END_BUILTINS);
2865 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
2866 (int)RS6000_BUILTIN_COUNT);
2868 fprintf (stderr, DEBUG_FMT_D, "Enable float128 on VSX",
2869 (int)TARGET_FLOAT128_ENABLE_TYPE);
2871 if (TARGET_VSX)
2872 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit scalar element",
2873 (int)VECTOR_ELEMENT_SCALAR_64BIT);
2875 if (TARGET_DIRECT_MOVE_128)
2876 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit mfvsrld element",
2877 (int)VECTOR_ELEMENT_MFVSRLD_64BIT);
2881 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2882 legitimate address support to figure out the appropriate addressing to
2883 use. */
2885 static void
2886 rs6000_setup_reg_addr_masks (void)
2888 ssize_t rc, reg, m, nregs;
2889 addr_mask_type any_addr_mask, addr_mask;
2891 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2893 machine_mode m2 = (machine_mode) m;
2894 bool complex_p = false;
2895 bool small_int_p = (m2 == QImode || m2 == HImode || m2 == SImode);
2896 size_t msize;
2898 if (COMPLEX_MODE_P (m2))
2900 complex_p = true;
2901 m2 = GET_MODE_INNER (m2);
2904 msize = GET_MODE_SIZE (m2);
2906 /* SDmode is special in that we want to access it only via REG+REG
2907 addressing on power7 and above, since we want to use the LFIWZX and
2908 STFIWZX instructions to load it. */
2909 bool indexed_only_p = (m == SDmode && TARGET_NO_SDMODE_STACK);
2911 any_addr_mask = 0;
2912 for (rc = FIRST_RELOAD_REG_CLASS; rc <= LAST_RELOAD_REG_CLASS; rc++)
2914 addr_mask = 0;
2915 reg = reload_reg_map[rc].reg;
2917 /* Can mode values go in the GPR/FPR/Altivec registers? */
2918 if (reg >= 0 && rs6000_hard_regno_mode_ok_p[m][reg])
2920 bool small_int_vsx_p = (small_int_p
2921 && (rc == RELOAD_REG_FPR
2922 || rc == RELOAD_REG_VMX));
2924 nregs = rs6000_hard_regno_nregs[m][reg];
2925 addr_mask |= RELOAD_REG_VALID;
2927 /* Indicate if the mode takes more than 1 physical register. If
2928 it takes a single register, indicate it can do REG+REG
2929 addressing. Small integers in VSX registers can only do
2930 REG+REG addressing. */
2931 if (small_int_vsx_p)
2932 addr_mask |= RELOAD_REG_INDEXED;
2933 else if (nregs > 1 || m == BLKmode || complex_p)
2934 addr_mask |= RELOAD_REG_MULTIPLE;
2935 else
2936 addr_mask |= RELOAD_REG_INDEXED;
2938 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2939 addressing. Restrict addressing on SPE for 64-bit types
2940 because of the SUBREG hackery used to address 64-bit floats in
2941 '32-bit' GPRs. If we allow scalars into Altivec registers,
2942 don't allow PRE_INC, PRE_DEC, or PRE_MODIFY. */
2944 if (TARGET_UPDATE
2945 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR)
2946 && msize <= 8
2947 && !VECTOR_MODE_P (m2)
2948 && !FLOAT128_VECTOR_P (m2)
2949 && !complex_p
2950 && !small_int_vsx_p
2951 && (m2 != DFmode || !TARGET_UPPER_REGS_DF)
2952 && (m2 != SFmode || !TARGET_UPPER_REGS_SF)
2953 && !(TARGET_E500_DOUBLE && msize == 8))
2955 addr_mask |= RELOAD_REG_PRE_INCDEC;
2957 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2958 we don't allow PRE_MODIFY for some multi-register
2959 operations. */
2960 switch (m)
2962 default:
2963 addr_mask |= RELOAD_REG_PRE_MODIFY;
2964 break;
2966 case DImode:
2967 if (TARGET_POWERPC64)
2968 addr_mask |= RELOAD_REG_PRE_MODIFY;
2969 break;
2971 case DFmode:
2972 case DDmode:
2973 if (TARGET_DF_INSN)
2974 addr_mask |= RELOAD_REG_PRE_MODIFY;
2975 break;
2980 /* GPR and FPR registers can do REG+OFFSET addressing, except
2981 possibly for SDmode. ISA 3.0 (i.e. power9) adds D-form addressing
2982 for 64-bit scalars and 32-bit SFmode to altivec registers. */
2983 if ((addr_mask != 0) && !indexed_only_p
2984 && msize <= 8
2985 && (rc == RELOAD_REG_GPR
2986 || ((msize == 8 || m2 == SFmode)
2987 && (rc == RELOAD_REG_FPR
2988 || (rc == RELOAD_REG_VMX
2989 && TARGET_P9_DFORM_SCALAR)))))
2990 addr_mask |= RELOAD_REG_OFFSET;
2992 /* VSX registers can do REG+OFFSET addresssing if ISA 3.0
2993 instructions are enabled. The offset for 128-bit VSX registers is
2994 only 12-bits. While GPRs can handle the full offset range, VSX
2995 registers can only handle the restricted range. */
2996 else if ((addr_mask != 0) && !indexed_only_p
2997 && msize == 16 && TARGET_P9_DFORM_VECTOR
2998 && (ALTIVEC_OR_VSX_VECTOR_MODE (m2)
2999 || (m2 == TImode && TARGET_VSX_TIMODE)))
3001 addr_mask |= RELOAD_REG_OFFSET;
3002 if (rc == RELOAD_REG_FPR || rc == RELOAD_REG_VMX)
3003 addr_mask |= RELOAD_REG_QUAD_OFFSET;
3006 /* VMX registers can do (REG & -16) and ((REG+REG) & -16)
3007 addressing on 128-bit types. */
3008 if (rc == RELOAD_REG_VMX && msize == 16
3009 && (addr_mask & RELOAD_REG_VALID) != 0)
3010 addr_mask |= RELOAD_REG_AND_M16;
3012 reg_addr[m].addr_mask[rc] = addr_mask;
3013 any_addr_mask |= addr_mask;
3016 reg_addr[m].addr_mask[RELOAD_REG_ANY] = any_addr_mask;
3021 /* Initialize the various global tables that are based on register size. */
3022 static void
3023 rs6000_init_hard_regno_mode_ok (bool global_init_p)
3025 ssize_t r, m, c;
3026 int align64;
3027 int align32;
3029 /* Precalculate REGNO_REG_CLASS. */
3030 rs6000_regno_regclass[0] = GENERAL_REGS;
3031 for (r = 1; r < 32; ++r)
3032 rs6000_regno_regclass[r] = BASE_REGS;
3034 for (r = 32; r < 64; ++r)
3035 rs6000_regno_regclass[r] = FLOAT_REGS;
3037 for (r = 64; r < FIRST_PSEUDO_REGISTER; ++r)
3038 rs6000_regno_regclass[r] = NO_REGS;
3040 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
3041 rs6000_regno_regclass[r] = ALTIVEC_REGS;
3043 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
3044 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
3045 rs6000_regno_regclass[r] = CR_REGS;
3047 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
3048 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
3049 rs6000_regno_regclass[CA_REGNO] = NO_REGS;
3050 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
3051 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
3052 rs6000_regno_regclass[SPE_ACC_REGNO] = SPE_ACC_REGS;
3053 rs6000_regno_regclass[SPEFSCR_REGNO] = SPEFSCR_REGS;
3054 rs6000_regno_regclass[TFHAR_REGNO] = SPR_REGS;
3055 rs6000_regno_regclass[TFIAR_REGNO] = SPR_REGS;
3056 rs6000_regno_regclass[TEXASR_REGNO] = SPR_REGS;
3057 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
3058 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
3060 /* Precalculate register class to simpler reload register class. We don't
3061 need all of the register classes that are combinations of different
3062 classes, just the simple ones that have constraint letters. */
3063 for (c = 0; c < N_REG_CLASSES; c++)
3064 reg_class_to_reg_type[c] = NO_REG_TYPE;
3066 reg_class_to_reg_type[(int)GENERAL_REGS] = GPR_REG_TYPE;
3067 reg_class_to_reg_type[(int)BASE_REGS] = GPR_REG_TYPE;
3068 reg_class_to_reg_type[(int)VSX_REGS] = VSX_REG_TYPE;
3069 reg_class_to_reg_type[(int)VRSAVE_REGS] = SPR_REG_TYPE;
3070 reg_class_to_reg_type[(int)VSCR_REGS] = SPR_REG_TYPE;
3071 reg_class_to_reg_type[(int)LINK_REGS] = SPR_REG_TYPE;
3072 reg_class_to_reg_type[(int)CTR_REGS] = SPR_REG_TYPE;
3073 reg_class_to_reg_type[(int)LINK_OR_CTR_REGS] = SPR_REG_TYPE;
3074 reg_class_to_reg_type[(int)CR_REGS] = CR_REG_TYPE;
3075 reg_class_to_reg_type[(int)CR0_REGS] = CR_REG_TYPE;
3076 reg_class_to_reg_type[(int)SPE_ACC_REGS] = SPE_ACC_TYPE;
3077 reg_class_to_reg_type[(int)SPEFSCR_REGS] = SPEFSCR_REG_TYPE;
3079 if (TARGET_VSX)
3081 reg_class_to_reg_type[(int)FLOAT_REGS] = VSX_REG_TYPE;
3082 reg_class_to_reg_type[(int)ALTIVEC_REGS] = VSX_REG_TYPE;
3084 else
3086 reg_class_to_reg_type[(int)FLOAT_REGS] = FPR_REG_TYPE;
3087 reg_class_to_reg_type[(int)ALTIVEC_REGS] = ALTIVEC_REG_TYPE;
3090 /* Precalculate the valid memory formats as well as the vector information,
3091 this must be set up before the rs6000_hard_regno_nregs_internal calls
3092 below. */
3093 gcc_assert ((int)VECTOR_NONE == 0);
3094 memset ((void *) &rs6000_vector_unit[0], '\0', sizeof (rs6000_vector_unit));
3095 memset ((void *) &rs6000_vector_mem[0], '\0', sizeof (rs6000_vector_unit));
3097 gcc_assert ((int)CODE_FOR_nothing == 0);
3098 memset ((void *) &reg_addr[0], '\0', sizeof (reg_addr));
3100 gcc_assert ((int)NO_REGS == 0);
3101 memset ((void *) &rs6000_constraints[0], '\0', sizeof (rs6000_constraints));
3103 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
3104 believes it can use native alignment or still uses 128-bit alignment. */
3105 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
3107 align64 = 64;
3108 align32 = 32;
3110 else
3112 align64 = 128;
3113 align32 = 128;
3116 /* KF mode (IEEE 128-bit in VSX registers). We do not have arithmetic, so
3117 only set the memory modes. Include TFmode if -mabi=ieeelongdouble. */
3118 if (TARGET_FLOAT128_TYPE)
3120 rs6000_vector_mem[KFmode] = VECTOR_VSX;
3121 rs6000_vector_align[KFmode] = 128;
3123 if (FLOAT128_IEEE_P (TFmode))
3125 rs6000_vector_mem[TFmode] = VECTOR_VSX;
3126 rs6000_vector_align[TFmode] = 128;
3130 /* V2DF mode, VSX only. */
3131 if (TARGET_VSX)
3133 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
3134 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
3135 rs6000_vector_align[V2DFmode] = align64;
3138 /* V4SF mode, either VSX or Altivec. */
3139 if (TARGET_VSX)
3141 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
3142 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
3143 rs6000_vector_align[V4SFmode] = align32;
3145 else if (TARGET_ALTIVEC)
3147 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
3148 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
3149 rs6000_vector_align[V4SFmode] = align32;
3152 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
3153 and stores. */
3154 if (TARGET_ALTIVEC)
3156 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
3157 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
3158 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
3159 rs6000_vector_align[V4SImode] = align32;
3160 rs6000_vector_align[V8HImode] = align32;
3161 rs6000_vector_align[V16QImode] = align32;
3163 if (TARGET_VSX)
3165 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
3166 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
3167 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
3169 else
3171 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
3172 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
3173 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
3177 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
3178 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
3179 if (TARGET_VSX)
3181 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
3182 rs6000_vector_unit[V2DImode]
3183 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3184 rs6000_vector_align[V2DImode] = align64;
3186 rs6000_vector_mem[V1TImode] = VECTOR_VSX;
3187 rs6000_vector_unit[V1TImode]
3188 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3189 rs6000_vector_align[V1TImode] = 128;
3192 /* DFmode, see if we want to use the VSX unit. Memory is handled
3193 differently, so don't set rs6000_vector_mem. */
3194 if (TARGET_VSX && TARGET_VSX_SCALAR_DOUBLE)
3196 rs6000_vector_unit[DFmode] = VECTOR_VSX;
3197 rs6000_vector_align[DFmode] = 64;
3200 /* SFmode, see if we want to use the VSX unit. */
3201 if (TARGET_P8_VECTOR && TARGET_VSX_SCALAR_FLOAT)
3203 rs6000_vector_unit[SFmode] = VECTOR_VSX;
3204 rs6000_vector_align[SFmode] = 32;
3207 /* Allow TImode in VSX register and set the VSX memory macros. */
3208 if (TARGET_VSX && TARGET_VSX_TIMODE)
3210 rs6000_vector_mem[TImode] = VECTOR_VSX;
3211 rs6000_vector_align[TImode] = align64;
3214 /* TODO add SPE and paired floating point vector support. */
3216 /* Register class constraints for the constraints that depend on compile
3217 switches. When the VSX code was added, different constraints were added
3218 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
3219 of the VSX registers are used. The register classes for scalar floating
3220 point types is set, based on whether we allow that type into the upper
3221 (Altivec) registers. GCC has register classes to target the Altivec
3222 registers for load/store operations, to select using a VSX memory
3223 operation instead of the traditional floating point operation. The
3224 constraints are:
3226 d - Register class to use with traditional DFmode instructions.
3227 f - Register class to use with traditional SFmode instructions.
3228 v - Altivec register.
3229 wa - Any VSX register.
3230 wc - Reserved to represent individual CR bits (used in LLVM).
3231 wd - Preferred register class for V2DFmode.
3232 wf - Preferred register class for V4SFmode.
3233 wg - Float register for power6x move insns.
3234 wh - FP register for direct move instructions.
3235 wi - FP or VSX register to hold 64-bit integers for VSX insns.
3236 wj - FP or VSX register to hold 64-bit integers for direct moves.
3237 wk - FP or VSX register to hold 64-bit doubles for direct moves.
3238 wl - Float register if we can do 32-bit signed int loads.
3239 wm - VSX register for ISA 2.07 direct move operations.
3240 wn - always NO_REGS.
3241 wr - GPR if 64-bit mode is permitted.
3242 ws - Register class to do ISA 2.06 DF operations.
3243 wt - VSX register for TImode in VSX registers.
3244 wu - Altivec register for ISA 2.07 VSX SF/SI load/stores.
3245 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
3246 ww - Register class to do SF conversions in with VSX operations.
3247 wx - Float register if we can do 32-bit int stores.
3248 wy - Register class to do ISA 2.07 SF operations.
3249 wz - Float register if we can do 32-bit unsigned int loads.
3250 wH - Altivec register if SImode is allowed in VSX registers.
3251 wI - VSX register if SImode is allowed in VSX registers.
3252 wJ - VSX register if QImode/HImode are allowed in VSX registers.
3253 wK - Altivec register if QImode/HImode are allowed in VSX registers. */
3255 if (TARGET_HARD_FLOAT && TARGET_FPRS)
3256 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS; /* SFmode */
3258 if (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
3259 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS; /* DFmode */
3261 if (TARGET_VSX)
3263 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
3264 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS; /* V2DFmode */
3265 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS; /* V4SFmode */
3267 if (TARGET_VSX_TIMODE)
3268 rs6000_constraints[RS6000_CONSTRAINT_wt] = VSX_REGS; /* TImode */
3270 if (TARGET_UPPER_REGS_DF) /* DFmode */
3272 rs6000_constraints[RS6000_CONSTRAINT_ws] = VSX_REGS;
3273 rs6000_constraints[RS6000_CONSTRAINT_wv] = ALTIVEC_REGS;
3275 else
3276 rs6000_constraints[RS6000_CONSTRAINT_ws] = FLOAT_REGS;
3278 if (TARGET_UPPER_REGS_DI) /* DImode */
3279 rs6000_constraints[RS6000_CONSTRAINT_wi] = VSX_REGS;
3280 else
3281 rs6000_constraints[RS6000_CONSTRAINT_wi] = FLOAT_REGS;
3284 /* Add conditional constraints based on various options, to allow us to
3285 collapse multiple insn patterns. */
3286 if (TARGET_ALTIVEC)
3287 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
3289 if (TARGET_MFPGPR) /* DFmode */
3290 rs6000_constraints[RS6000_CONSTRAINT_wg] = FLOAT_REGS;
3292 if (TARGET_LFIWAX)
3293 rs6000_constraints[RS6000_CONSTRAINT_wl] = FLOAT_REGS; /* DImode */
3295 if (TARGET_DIRECT_MOVE)
3297 rs6000_constraints[RS6000_CONSTRAINT_wh] = FLOAT_REGS;
3298 rs6000_constraints[RS6000_CONSTRAINT_wj] /* DImode */
3299 = rs6000_constraints[RS6000_CONSTRAINT_wi];
3300 rs6000_constraints[RS6000_CONSTRAINT_wk] /* DFmode */
3301 = rs6000_constraints[RS6000_CONSTRAINT_ws];
3302 rs6000_constraints[RS6000_CONSTRAINT_wm] = VSX_REGS;
3305 if (TARGET_POWERPC64)
3307 rs6000_constraints[RS6000_CONSTRAINT_wr] = GENERAL_REGS;
3308 rs6000_constraints[RS6000_CONSTRAINT_wA] = BASE_REGS;
3311 if (TARGET_P8_VECTOR && TARGET_UPPER_REGS_SF) /* SFmode */
3313 rs6000_constraints[RS6000_CONSTRAINT_wu] = ALTIVEC_REGS;
3314 rs6000_constraints[RS6000_CONSTRAINT_wy] = VSX_REGS;
3315 rs6000_constraints[RS6000_CONSTRAINT_ww] = VSX_REGS;
3317 else if (TARGET_P8_VECTOR)
3319 rs6000_constraints[RS6000_CONSTRAINT_wy] = FLOAT_REGS;
3320 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
3322 else if (TARGET_VSX)
3323 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
3325 if (TARGET_STFIWX)
3326 rs6000_constraints[RS6000_CONSTRAINT_wx] = FLOAT_REGS; /* DImode */
3328 if (TARGET_LFIWZX)
3329 rs6000_constraints[RS6000_CONSTRAINT_wz] = FLOAT_REGS; /* DImode */
3331 if (TARGET_FLOAT128_TYPE)
3333 rs6000_constraints[RS6000_CONSTRAINT_wq] = VSX_REGS; /* KFmode */
3334 if (FLOAT128_IEEE_P (TFmode))
3335 rs6000_constraints[RS6000_CONSTRAINT_wp] = VSX_REGS; /* TFmode */
3338 /* Support for new D-form instructions. */
3339 if (TARGET_P9_DFORM_SCALAR)
3340 rs6000_constraints[RS6000_CONSTRAINT_wb] = ALTIVEC_REGS;
3342 /* Support for ISA 3.0 (power9) vectors. */
3343 if (TARGET_P9_VECTOR)
3344 rs6000_constraints[RS6000_CONSTRAINT_wo] = VSX_REGS;
3346 /* Support for new direct moves (ISA 3.0 + 64bit). */
3347 if (TARGET_DIRECT_MOVE_128)
3348 rs6000_constraints[RS6000_CONSTRAINT_we] = VSX_REGS;
3350 /* Support small integers in VSX registers. */
3351 if (TARGET_VSX_SMALL_INTEGER)
3353 rs6000_constraints[RS6000_CONSTRAINT_wH] = ALTIVEC_REGS;
3354 rs6000_constraints[RS6000_CONSTRAINT_wI] = FLOAT_REGS;
3355 if (TARGET_P9_VECTOR)
3357 rs6000_constraints[RS6000_CONSTRAINT_wJ] = FLOAT_REGS;
3358 rs6000_constraints[RS6000_CONSTRAINT_wK] = ALTIVEC_REGS;
3362 /* Set up the reload helper and direct move functions. */
3363 if (TARGET_VSX || TARGET_ALTIVEC)
3365 if (TARGET_64BIT)
3367 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_di_store;
3368 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_di_load;
3369 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_di_store;
3370 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_di_load;
3371 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_di_store;
3372 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_di_load;
3373 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_di_store;
3374 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_di_load;
3375 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_di_store;
3376 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_di_load;
3377 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_di_store;
3378 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_di_load;
3379 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_di_store;
3380 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_di_load;
3381 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_di_store;
3382 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_di_load;
3383 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_di_store;
3384 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_di_load;
3385 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_di_store;
3386 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_di_load;
3388 if (FLOAT128_VECTOR_P (KFmode))
3390 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_di_store;
3391 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_di_load;
3394 if (FLOAT128_VECTOR_P (TFmode))
3396 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_di_store;
3397 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_di_load;
3400 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3401 available. */
3402 if (TARGET_NO_SDMODE_STACK)
3404 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_di_store;
3405 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_di_load;
3408 if (TARGET_VSX_TIMODE)
3410 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_di_store;
3411 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_di_load;
3414 if (TARGET_DIRECT_MOVE && !TARGET_DIRECT_MOVE_128)
3416 reg_addr[TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxti;
3417 reg_addr[V1TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv1ti;
3418 reg_addr[V2DFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2df;
3419 reg_addr[V2DImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2di;
3420 reg_addr[V4SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4sf;
3421 reg_addr[V4SImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4si;
3422 reg_addr[V8HImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv8hi;
3423 reg_addr[V16QImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv16qi;
3424 reg_addr[SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxsf;
3426 reg_addr[TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprti;
3427 reg_addr[V1TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv1ti;
3428 reg_addr[V2DFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2df;
3429 reg_addr[V2DImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2di;
3430 reg_addr[V4SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4sf;
3431 reg_addr[V4SImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4si;
3432 reg_addr[V8HImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv8hi;
3433 reg_addr[V16QImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv16qi;
3434 reg_addr[SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprsf;
3436 if (FLOAT128_VECTOR_P (KFmode))
3438 reg_addr[KFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxkf;
3439 reg_addr[KFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprkf;
3442 if (FLOAT128_VECTOR_P (TFmode))
3444 reg_addr[TFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxtf;
3445 reg_addr[TFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprtf;
3449 else
3451 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_si_store;
3452 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_si_load;
3453 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_si_store;
3454 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_si_load;
3455 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_si_store;
3456 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_si_load;
3457 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_si_store;
3458 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_si_load;
3459 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_si_store;
3460 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_si_load;
3461 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_si_store;
3462 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_si_load;
3463 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_si_store;
3464 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_si_load;
3465 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_si_store;
3466 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_si_load;
3467 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_si_store;
3468 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_si_load;
3469 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_si_store;
3470 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_si_load;
3472 if (FLOAT128_VECTOR_P (KFmode))
3474 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_si_store;
3475 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_si_load;
3478 if (FLOAT128_IEEE_P (TFmode))
3480 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_si_store;
3481 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_si_load;
3484 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3485 available. */
3486 if (TARGET_NO_SDMODE_STACK)
3488 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_si_store;
3489 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_si_load;
3492 if (TARGET_VSX_TIMODE)
3494 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_si_store;
3495 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_si_load;
3498 if (TARGET_DIRECT_MOVE)
3500 reg_addr[DImode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdi;
3501 reg_addr[DDmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdd;
3502 reg_addr[DFmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdf;
3506 if (TARGET_UPPER_REGS_DF)
3507 reg_addr[DFmode].scalar_in_vmx_p = true;
3509 if (TARGET_UPPER_REGS_DI)
3510 reg_addr[DImode].scalar_in_vmx_p = true;
3512 if (TARGET_UPPER_REGS_SF)
3513 reg_addr[SFmode].scalar_in_vmx_p = true;
3515 if (TARGET_VSX_SMALL_INTEGER)
3517 reg_addr[SImode].scalar_in_vmx_p = true;
3518 if (TARGET_P9_VECTOR)
3520 reg_addr[HImode].scalar_in_vmx_p = true;
3521 reg_addr[QImode].scalar_in_vmx_p = true;
3526 /* Setup the fusion operations. */
3527 if (TARGET_P8_FUSION)
3529 reg_addr[QImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_qi;
3530 reg_addr[HImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_hi;
3531 reg_addr[SImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_si;
3532 if (TARGET_64BIT)
3533 reg_addr[DImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_di;
3536 if (TARGET_P9_FUSION)
3538 struct fuse_insns {
3539 enum machine_mode mode; /* mode of the fused type. */
3540 enum machine_mode pmode; /* pointer mode. */
3541 enum rs6000_reload_reg_type rtype; /* register type. */
3542 enum insn_code load; /* load insn. */
3543 enum insn_code store; /* store insn. */
3546 static const struct fuse_insns addis_insns[] = {
3547 { E_SFmode, E_DImode, RELOAD_REG_FPR,
3548 CODE_FOR_fusion_vsx_di_sf_load,
3549 CODE_FOR_fusion_vsx_di_sf_store },
3551 { E_SFmode, E_SImode, RELOAD_REG_FPR,
3552 CODE_FOR_fusion_vsx_si_sf_load,
3553 CODE_FOR_fusion_vsx_si_sf_store },
3555 { E_DFmode, E_DImode, RELOAD_REG_FPR,
3556 CODE_FOR_fusion_vsx_di_df_load,
3557 CODE_FOR_fusion_vsx_di_df_store },
3559 { E_DFmode, E_SImode, RELOAD_REG_FPR,
3560 CODE_FOR_fusion_vsx_si_df_load,
3561 CODE_FOR_fusion_vsx_si_df_store },
3563 { E_DImode, E_DImode, RELOAD_REG_FPR,
3564 CODE_FOR_fusion_vsx_di_di_load,
3565 CODE_FOR_fusion_vsx_di_di_store },
3567 { E_DImode, E_SImode, RELOAD_REG_FPR,
3568 CODE_FOR_fusion_vsx_si_di_load,
3569 CODE_FOR_fusion_vsx_si_di_store },
3571 { E_QImode, E_DImode, RELOAD_REG_GPR,
3572 CODE_FOR_fusion_gpr_di_qi_load,
3573 CODE_FOR_fusion_gpr_di_qi_store },
3575 { E_QImode, E_SImode, RELOAD_REG_GPR,
3576 CODE_FOR_fusion_gpr_si_qi_load,
3577 CODE_FOR_fusion_gpr_si_qi_store },
3579 { E_HImode, E_DImode, RELOAD_REG_GPR,
3580 CODE_FOR_fusion_gpr_di_hi_load,
3581 CODE_FOR_fusion_gpr_di_hi_store },
3583 { E_HImode, E_SImode, RELOAD_REG_GPR,
3584 CODE_FOR_fusion_gpr_si_hi_load,
3585 CODE_FOR_fusion_gpr_si_hi_store },
3587 { E_SImode, E_DImode, RELOAD_REG_GPR,
3588 CODE_FOR_fusion_gpr_di_si_load,
3589 CODE_FOR_fusion_gpr_di_si_store },
3591 { E_SImode, E_SImode, RELOAD_REG_GPR,
3592 CODE_FOR_fusion_gpr_si_si_load,
3593 CODE_FOR_fusion_gpr_si_si_store },
3595 { E_SFmode, E_DImode, RELOAD_REG_GPR,
3596 CODE_FOR_fusion_gpr_di_sf_load,
3597 CODE_FOR_fusion_gpr_di_sf_store },
3599 { E_SFmode, E_SImode, RELOAD_REG_GPR,
3600 CODE_FOR_fusion_gpr_si_sf_load,
3601 CODE_FOR_fusion_gpr_si_sf_store },
3603 { E_DImode, E_DImode, RELOAD_REG_GPR,
3604 CODE_FOR_fusion_gpr_di_di_load,
3605 CODE_FOR_fusion_gpr_di_di_store },
3607 { E_DFmode, E_DImode, RELOAD_REG_GPR,
3608 CODE_FOR_fusion_gpr_di_df_load,
3609 CODE_FOR_fusion_gpr_di_df_store },
3612 machine_mode cur_pmode = Pmode;
3613 size_t i;
3615 for (i = 0; i < ARRAY_SIZE (addis_insns); i++)
3617 machine_mode xmode = addis_insns[i].mode;
3618 enum rs6000_reload_reg_type rtype = addis_insns[i].rtype;
3620 if (addis_insns[i].pmode != cur_pmode)
3621 continue;
3623 if (rtype == RELOAD_REG_FPR
3624 && (!TARGET_HARD_FLOAT || !TARGET_FPRS))
3625 continue;
3627 reg_addr[xmode].fusion_addis_ld[rtype] = addis_insns[i].load;
3628 reg_addr[xmode].fusion_addis_st[rtype] = addis_insns[i].store;
3630 if (rtype == RELOAD_REG_FPR && TARGET_P9_DFORM_SCALAR)
3632 reg_addr[xmode].fusion_addis_ld[RELOAD_REG_VMX]
3633 = addis_insns[i].load;
3634 reg_addr[xmode].fusion_addis_st[RELOAD_REG_VMX]
3635 = addis_insns[i].store;
3640 /* Note which types we support fusing TOC setup plus memory insn. We only do
3641 fused TOCs for medium/large code models. */
3642 if (TARGET_P8_FUSION && TARGET_TOC_FUSION && TARGET_POWERPC64
3643 && (TARGET_CMODEL != CMODEL_SMALL))
3645 reg_addr[QImode].fused_toc = true;
3646 reg_addr[HImode].fused_toc = true;
3647 reg_addr[SImode].fused_toc = true;
3648 reg_addr[DImode].fused_toc = true;
3649 if (TARGET_HARD_FLOAT && TARGET_FPRS)
3651 if (TARGET_SINGLE_FLOAT)
3652 reg_addr[SFmode].fused_toc = true;
3653 if (TARGET_DOUBLE_FLOAT)
3654 reg_addr[DFmode].fused_toc = true;
3658 /* Precalculate HARD_REGNO_NREGS. */
3659 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3660 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3661 rs6000_hard_regno_nregs[m][r]
3662 = rs6000_hard_regno_nregs_internal (r, (machine_mode)m);
3664 /* Precalculate HARD_REGNO_MODE_OK. */
3665 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3666 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3667 if (rs6000_hard_regno_mode_ok (r, (machine_mode)m))
3668 rs6000_hard_regno_mode_ok_p[m][r] = true;
3670 /* Precalculate CLASS_MAX_NREGS sizes. */
3671 for (c = 0; c < LIM_REG_CLASSES; ++c)
3673 int reg_size;
3675 if (TARGET_VSX && VSX_REG_CLASS_P (c))
3676 reg_size = UNITS_PER_VSX_WORD;
3678 else if (c == ALTIVEC_REGS)
3679 reg_size = UNITS_PER_ALTIVEC_WORD;
3681 else if (c == FLOAT_REGS)
3682 reg_size = UNITS_PER_FP_WORD;
3684 else
3685 reg_size = UNITS_PER_WORD;
3687 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3689 machine_mode m2 = (machine_mode)m;
3690 int reg_size2 = reg_size;
3692 /* TDmode & IBM 128-bit floating point always takes 2 registers, even
3693 in VSX. */
3694 if (TARGET_VSX && VSX_REG_CLASS_P (c) && FLOAT128_2REG_P (m))
3695 reg_size2 = UNITS_PER_FP_WORD;
3697 rs6000_class_max_nregs[m][c]
3698 = (GET_MODE_SIZE (m2) + reg_size2 - 1) / reg_size2;
3702 if (TARGET_E500_DOUBLE)
3703 rs6000_class_max_nregs[DFmode][GENERAL_REGS] = 1;
3705 /* Calculate which modes to automatically generate code to use a the
3706 reciprocal divide and square root instructions. In the future, possibly
3707 automatically generate the instructions even if the user did not specify
3708 -mrecip. The older machines double precision reciprocal sqrt estimate is
3709 not accurate enough. */
3710 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
3711 if (TARGET_FRES)
3712 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3713 if (TARGET_FRE)
3714 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3715 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3716 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3717 if (VECTOR_UNIT_VSX_P (V2DFmode))
3718 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3720 if (TARGET_FRSQRTES)
3721 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3722 if (TARGET_FRSQRTE)
3723 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3724 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3725 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3726 if (VECTOR_UNIT_VSX_P (V2DFmode))
3727 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3729 if (rs6000_recip_control)
3731 if (!flag_finite_math_only)
3732 warning (0, "-mrecip requires -ffinite-math or -ffast-math");
3733 if (flag_trapping_math)
3734 warning (0, "-mrecip requires -fno-trapping-math or -ffast-math");
3735 if (!flag_reciprocal_math)
3736 warning (0, "-mrecip requires -freciprocal-math or -ffast-math");
3737 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
3739 if (RS6000_RECIP_HAVE_RE_P (SFmode)
3740 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
3741 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3743 if (RS6000_RECIP_HAVE_RE_P (DFmode)
3744 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
3745 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3747 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
3748 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
3749 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3751 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
3752 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
3753 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3755 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
3756 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
3757 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3759 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
3760 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
3761 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3763 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
3764 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
3765 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3767 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
3768 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
3769 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3773 /* Update the addr mask bits in reg_addr to help secondary reload and go if
3774 legitimate address support to figure out the appropriate addressing to
3775 use. */
3776 rs6000_setup_reg_addr_masks ();
3778 if (global_init_p || TARGET_DEBUG_TARGET)
3780 if (TARGET_DEBUG_REG)
3781 rs6000_debug_reg_global ();
3783 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
3784 fprintf (stderr,
3785 "SImode variable mult cost = %d\n"
3786 "SImode constant mult cost = %d\n"
3787 "SImode short constant mult cost = %d\n"
3788 "DImode multipliciation cost = %d\n"
3789 "SImode division cost = %d\n"
3790 "DImode division cost = %d\n"
3791 "Simple fp operation cost = %d\n"
3792 "DFmode multiplication cost = %d\n"
3793 "SFmode division cost = %d\n"
3794 "DFmode division cost = %d\n"
3795 "cache line size = %d\n"
3796 "l1 cache size = %d\n"
3797 "l2 cache size = %d\n"
3798 "simultaneous prefetches = %d\n"
3799 "\n",
3800 rs6000_cost->mulsi,
3801 rs6000_cost->mulsi_const,
3802 rs6000_cost->mulsi_const9,
3803 rs6000_cost->muldi,
3804 rs6000_cost->divsi,
3805 rs6000_cost->divdi,
3806 rs6000_cost->fp,
3807 rs6000_cost->dmul,
3808 rs6000_cost->sdiv,
3809 rs6000_cost->ddiv,
3810 rs6000_cost->cache_line_size,
3811 rs6000_cost->l1_cache_size,
3812 rs6000_cost->l2_cache_size,
3813 rs6000_cost->simultaneous_prefetches);
3817 #if TARGET_MACHO
3818 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
3820 static void
3821 darwin_rs6000_override_options (void)
3823 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
3824 off. */
3825 rs6000_altivec_abi = 1;
3826 TARGET_ALTIVEC_VRSAVE = 1;
3827 rs6000_current_abi = ABI_DARWIN;
3829 if (DEFAULT_ABI == ABI_DARWIN
3830 && TARGET_64BIT)
3831 darwin_one_byte_bool = 1;
3833 if (TARGET_64BIT && ! TARGET_POWERPC64)
3835 rs6000_isa_flags |= OPTION_MASK_POWERPC64;
3836 warning (0, "-m64 requires PowerPC64 architecture, enabling");
3838 if (flag_mkernel)
3840 rs6000_default_long_calls = 1;
3841 rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT;
3844 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3845 Altivec. */
3846 if (!flag_mkernel && !flag_apple_kext
3847 && TARGET_64BIT
3848 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC))
3849 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3851 /* Unless the user (not the configurer) has explicitly overridden
3852 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3853 G4 unless targeting the kernel. */
3854 if (!flag_mkernel
3855 && !flag_apple_kext
3856 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
3857 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC)
3858 && ! global_options_set.x_rs6000_cpu_index)
3860 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3863 #endif
3865 /* If not otherwise specified by a target, make 'long double' equivalent to
3866 'double'. */
3868 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3869 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3870 #endif
3872 /* Return the builtin mask of the various options used that could affect which
3873 builtins were used. In the past we used target_flags, but we've run out of
3874 bits, and some options like SPE and PAIRED are no longer in
3875 target_flags. */
3877 HOST_WIDE_INT
3878 rs6000_builtin_mask_calculate (void)
3880 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
3881 | ((TARGET_CMPB) ? RS6000_BTM_CMPB : 0)
3882 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
3883 | ((TARGET_SPE) ? RS6000_BTM_SPE : 0)
3884 | ((TARGET_PAIRED_FLOAT) ? RS6000_BTM_PAIRED : 0)
3885 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
3886 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
3887 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
3888 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
3889 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
3890 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0)
3891 | ((TARGET_P8_VECTOR) ? RS6000_BTM_P8_VECTOR : 0)
3892 | ((TARGET_P9_VECTOR) ? RS6000_BTM_P9_VECTOR : 0)
3893 | ((TARGET_P9_MISC) ? RS6000_BTM_P9_MISC : 0)
3894 | ((TARGET_MODULO) ? RS6000_BTM_MODULO : 0)
3895 | ((TARGET_64BIT) ? RS6000_BTM_64BIT : 0)
3896 | ((TARGET_CRYPTO) ? RS6000_BTM_CRYPTO : 0)
3897 | ((TARGET_HTM) ? RS6000_BTM_HTM : 0)
3898 | ((TARGET_DFP) ? RS6000_BTM_DFP : 0)
3899 | ((TARGET_HARD_FLOAT) ? RS6000_BTM_HARD_FLOAT : 0)
3900 | ((TARGET_LONG_DOUBLE_128) ? RS6000_BTM_LDBL128 : 0)
3901 | ((TARGET_FLOAT128_TYPE) ? RS6000_BTM_FLOAT128 : 0));
3904 /* Implement TARGET_MD_ASM_ADJUST. All asm statements are considered
3905 to clobber the XER[CA] bit because clobbering that bit without telling
3906 the compiler worked just fine with versions of GCC before GCC 5, and
3907 breaking a lot of older code in ways that are hard to track down is
3908 not such a great idea. */
3910 static rtx_insn *
3911 rs6000_md_asm_adjust (vec<rtx> &/*outputs*/, vec<rtx> &/*inputs*/,
3912 vec<const char *> &/*constraints*/,
3913 vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs)
3915 clobbers.safe_push (gen_rtx_REG (SImode, CA_REGNO));
3916 SET_HARD_REG_BIT (clobbered_regs, CA_REGNO);
3917 return NULL;
3920 /* Override command line options.
3922 Combine build-specific configuration information with options
3923 specified on the command line to set various state variables which
3924 influence code generation, optimization, and expansion of built-in
3925 functions. Assure that command-line configuration preferences are
3926 compatible with each other and with the build configuration; issue
3927 warnings while adjusting configuration or error messages while
3928 rejecting configuration.
3930 Upon entry to this function:
3932 This function is called once at the beginning of
3933 compilation, and then again at the start and end of compiling
3934 each section of code that has a different configuration, as
3935 indicated, for example, by adding the
3937 __attribute__((__target__("cpu=power9")))
3939 qualifier to a function definition or, for example, by bracketing
3940 code between
3942 #pragma GCC target("altivec")
3946 #pragma GCC reset_options
3948 directives. Parameter global_init_p is true for the initial
3949 invocation, which initializes global variables, and false for all
3950 subsequent invocations.
3953 Various global state information is assumed to be valid. This
3954 includes OPTION_TARGET_CPU_DEFAULT, representing the name of the
3955 default CPU specified at build configure time, TARGET_DEFAULT,
3956 representing the default set of option flags for the default
3957 target, and global_options_set.x_rs6000_isa_flags, representing
3958 which options were requested on the command line.
3960 Upon return from this function:
3962 rs6000_isa_flags_explicit has a non-zero bit for each flag that
3963 was set by name on the command line. Additionally, if certain
3964 attributes are automatically enabled or disabled by this function
3965 in order to assure compatibility between options and
3966 configuration, the flags associated with those attributes are
3967 also set. By setting these "explicit bits", we avoid the risk
3968 that other code might accidentally overwrite these particular
3969 attributes with "default values".
3971 The various bits of rs6000_isa_flags are set to indicate the
3972 target options that have been selected for the most current
3973 compilation efforts. This has the effect of also turning on the
3974 associated TARGET_XXX values since these are macros which are
3975 generally defined to test the corresponding bit of the
3976 rs6000_isa_flags variable.
3978 The variable rs6000_builtin_mask is set to represent the target
3979 options for the most current compilation efforts, consistent with
3980 the current contents of rs6000_isa_flags. This variable controls
3981 expansion of built-in functions.
3983 Various other global variables and fields of global structures
3984 (over 50 in all) are initialized to reflect the desired options
3985 for the most current compilation efforts. */
3987 static bool
3988 rs6000_option_override_internal (bool global_init_p)
3990 bool ret = true;
3991 bool have_cpu = false;
3993 /* The default cpu requested at configure time, if any. */
3994 const char *implicit_cpu = OPTION_TARGET_CPU_DEFAULT;
3996 HOST_WIDE_INT set_masks;
3997 HOST_WIDE_INT ignore_masks;
3998 int cpu_index;
3999 int tune_index;
4000 struct cl_target_option *main_target_opt
4001 = ((global_init_p || target_option_default_node == NULL)
4002 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
4004 /* Print defaults. */
4005 if ((TARGET_DEBUG_REG || TARGET_DEBUG_TARGET) && global_init_p)
4006 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
4008 /* Remember the explicit arguments. */
4009 if (global_init_p)
4010 rs6000_isa_flags_explicit = global_options_set.x_rs6000_isa_flags;
4012 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
4013 library functions, so warn about it. The flag may be useful for
4014 performance studies from time to time though, so don't disable it
4015 entirely. */
4016 if (global_options_set.x_rs6000_alignment_flags
4017 && rs6000_alignment_flags == MASK_ALIGN_POWER
4018 && DEFAULT_ABI == ABI_DARWIN
4019 && TARGET_64BIT)
4020 warning (0, "-malign-power is not supported for 64-bit Darwin;"
4021 " it is incompatible with the installed C and C++ libraries");
4023 /* Numerous experiment shows that IRA based loop pressure
4024 calculation works better for RTL loop invariant motion on targets
4025 with enough (>= 32) registers. It is an expensive optimization.
4026 So it is on only for peak performance. */
4027 if (optimize >= 3 && global_init_p
4028 && !global_options_set.x_flag_ira_loop_pressure)
4029 flag_ira_loop_pressure = 1;
4031 /* -fsanitize=address needs to turn on -fasynchronous-unwind-tables in order
4032 for tracebacks to be complete but not if any -fasynchronous-unwind-tables
4033 options were already specified. */
4034 if (flag_sanitize & SANITIZE_USER_ADDRESS
4035 && !global_options_set.x_flag_asynchronous_unwind_tables)
4036 flag_asynchronous_unwind_tables = 1;
4038 /* Set the pointer size. */
4039 if (TARGET_64BIT)
4041 rs6000_pmode = (int)DImode;
4042 rs6000_pointer_size = 64;
4044 else
4046 rs6000_pmode = (int)SImode;
4047 rs6000_pointer_size = 32;
4050 /* Some OSs don't support saving the high part of 64-bit registers on context
4051 switch. Other OSs don't support saving Altivec registers. On those OSs,
4052 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
4053 if the user wants either, the user must explicitly specify them and we
4054 won't interfere with the user's specification. */
4056 set_masks = POWERPC_MASKS;
4057 #ifdef OS_MISSING_POWERPC64
4058 if (OS_MISSING_POWERPC64)
4059 set_masks &= ~OPTION_MASK_POWERPC64;
4060 #endif
4061 #ifdef OS_MISSING_ALTIVEC
4062 if (OS_MISSING_ALTIVEC)
4063 set_masks &= ~(OPTION_MASK_ALTIVEC | OPTION_MASK_VSX
4064 | OTHER_VSX_VECTOR_MASKS);
4065 #endif
4067 /* Don't override by the processor default if given explicitly. */
4068 set_masks &= ~rs6000_isa_flags_explicit;
4070 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
4071 the cpu in a target attribute or pragma, but did not specify a tuning
4072 option, use the cpu for the tuning option rather than the option specified
4073 with -mtune on the command line. Process a '--with-cpu' configuration
4074 request as an implicit --cpu. */
4075 if (rs6000_cpu_index >= 0)
4077 cpu_index = rs6000_cpu_index;
4078 have_cpu = true;
4080 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
4082 rs6000_cpu_index = cpu_index = main_target_opt->x_rs6000_cpu_index;
4083 have_cpu = true;
4085 else if (implicit_cpu)
4087 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (implicit_cpu);
4088 have_cpu = true;
4090 else
4092 /* PowerPC 64-bit LE requires at least ISA 2.07. */
4093 const char *default_cpu = ((!TARGET_POWERPC64)
4094 ? "powerpc"
4095 : ((BYTES_BIG_ENDIAN)
4096 ? "powerpc64"
4097 : "powerpc64le"));
4099 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (default_cpu);
4100 have_cpu = false;
4103 gcc_assert (cpu_index >= 0);
4105 if (have_cpu)
4107 #ifndef HAVE_AS_POWER9
4108 if (processor_target_table[rs6000_cpu_index].processor
4109 == PROCESSOR_POWER9)
4111 have_cpu = false;
4112 warning (0, "will not generate power9 instructions because "
4113 "assembler lacks power9 support");
4115 #endif
4116 #ifndef HAVE_AS_POWER8
4117 if (processor_target_table[rs6000_cpu_index].processor
4118 == PROCESSOR_POWER8)
4120 have_cpu = false;
4121 warning (0, "will not generate power8 instructions because "
4122 "assembler lacks power8 support");
4124 #endif
4125 #ifndef HAVE_AS_POPCNTD
4126 if (processor_target_table[rs6000_cpu_index].processor
4127 == PROCESSOR_POWER7)
4129 have_cpu = false;
4130 warning (0, "will not generate power7 instructions because "
4131 "assembler lacks power7 support");
4133 #endif
4134 #ifndef HAVE_AS_DFP
4135 if (processor_target_table[rs6000_cpu_index].processor
4136 == PROCESSOR_POWER6)
4138 have_cpu = false;
4139 warning (0, "will not generate power6 instructions because "
4140 "assembler lacks power6 support");
4142 #endif
4143 #ifndef HAVE_AS_POPCNTB
4144 if (processor_target_table[rs6000_cpu_index].processor
4145 == PROCESSOR_POWER5)
4147 have_cpu = false;
4148 warning (0, "will not generate power5 instructions because "
4149 "assembler lacks power5 support");
4151 #endif
4153 if (!have_cpu)
4155 /* PowerPC 64-bit LE requires at least ISA 2.07. */
4156 const char *default_cpu = (!TARGET_POWERPC64
4157 ? "powerpc"
4158 : (BYTES_BIG_ENDIAN
4159 ? "powerpc64"
4160 : "powerpc64le"));
4162 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (default_cpu);
4166 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
4167 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
4168 with those from the cpu, except for options that were explicitly set. If
4169 we don't have a cpu, do not override the target bits set in
4170 TARGET_DEFAULT. */
4171 if (have_cpu)
4173 rs6000_isa_flags &= ~set_masks;
4174 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
4175 & set_masks);
4177 else
4179 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
4180 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
4181 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
4182 to using rs6000_isa_flags, we need to do the initialization here.
4184 If there is a TARGET_DEFAULT, use that. Otherwise fall back to using
4185 -mcpu=powerpc, -mcpu=powerpc64, or -mcpu=powerpc64le defaults. */
4186 HOST_WIDE_INT flags = ((TARGET_DEFAULT) ? TARGET_DEFAULT
4187 : processor_target_table[cpu_index].target_enable);
4188 rs6000_isa_flags |= (flags & ~rs6000_isa_flags_explicit);
4191 if (rs6000_tune_index >= 0)
4192 tune_index = rs6000_tune_index;
4193 else if (have_cpu)
4194 rs6000_tune_index = tune_index = cpu_index;
4195 else
4197 size_t i;
4198 enum processor_type tune_proc
4199 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
4201 tune_index = -1;
4202 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
4203 if (processor_target_table[i].processor == tune_proc)
4205 rs6000_tune_index = tune_index = i;
4206 break;
4210 gcc_assert (tune_index >= 0);
4211 rs6000_cpu = processor_target_table[tune_index].processor;
4213 /* Pick defaults for SPE related control flags. Do this early to make sure
4214 that the TARGET_ macros are representative ASAP. */
4216 int spe_capable_cpu =
4217 (rs6000_cpu == PROCESSOR_PPC8540
4218 || rs6000_cpu == PROCESSOR_PPC8548);
4220 if (!global_options_set.x_rs6000_spe_abi)
4221 rs6000_spe_abi = spe_capable_cpu;
4223 if (!global_options_set.x_rs6000_spe)
4224 rs6000_spe = spe_capable_cpu;
4226 if (!global_options_set.x_rs6000_float_gprs)
4227 rs6000_float_gprs =
4228 (rs6000_cpu == PROCESSOR_PPC8540 ? 1
4229 : rs6000_cpu == PROCESSOR_PPC8548 ? 2
4230 : 0);
4233 if (global_options_set.x_rs6000_spe_abi
4234 && rs6000_spe_abi
4235 && !TARGET_SPE_ABI)
4236 error ("not configured for SPE ABI");
4238 if (global_options_set.x_rs6000_spe
4239 && rs6000_spe
4240 && !TARGET_SPE)
4241 error ("not configured for SPE instruction set");
4243 if (main_target_opt != NULL
4244 && ((main_target_opt->x_rs6000_spe_abi != rs6000_spe_abi)
4245 || (main_target_opt->x_rs6000_spe != rs6000_spe)
4246 || (main_target_opt->x_rs6000_float_gprs != rs6000_float_gprs)))
4247 error ("target attribute or pragma changes SPE ABI");
4249 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
4250 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
4251 || rs6000_cpu == PROCESSOR_PPCE5500)
4253 if (TARGET_ALTIVEC)
4254 error ("AltiVec not supported in this target");
4255 if (TARGET_SPE)
4256 error ("SPE not supported in this target");
4258 if (rs6000_cpu == PROCESSOR_PPCE6500)
4260 if (TARGET_SPE)
4261 error ("SPE not supported in this target");
4264 /* Disable Cell microcode if we are optimizing for the Cell
4265 and not optimizing for size. */
4266 if (rs6000_gen_cell_microcode == -1)
4267 rs6000_gen_cell_microcode = !(rs6000_cpu == PROCESSOR_CELL
4268 && !optimize_size);
4270 /* If we are optimizing big endian systems for space and it's OK to
4271 use instructions that would be microcoded on the Cell, use the
4272 load/store multiple and string instructions. */
4273 if (BYTES_BIG_ENDIAN && optimize_size && rs6000_gen_cell_microcode)
4274 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & (OPTION_MASK_MULTIPLE
4275 | OPTION_MASK_STRING);
4277 /* Don't allow -mmultiple or -mstring on little endian systems
4278 unless the cpu is a 750, because the hardware doesn't support the
4279 instructions used in little endian mode, and causes an alignment
4280 trap. The 750 does not cause an alignment trap (except when the
4281 target is unaligned). */
4283 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750)
4285 if (TARGET_MULTIPLE)
4287 rs6000_isa_flags &= ~OPTION_MASK_MULTIPLE;
4288 if ((rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE) != 0)
4289 warning (0, "-mmultiple is not supported on little endian systems");
4292 if (TARGET_STRING)
4294 rs6000_isa_flags &= ~OPTION_MASK_STRING;
4295 if ((rs6000_isa_flags_explicit & OPTION_MASK_STRING) != 0)
4296 warning (0, "-mstring is not supported on little endian systems");
4300 /* If little-endian, default to -mstrict-align on older processors.
4301 Testing for htm matches power8 and later. */
4302 if (!BYTES_BIG_ENDIAN
4303 && !(processor_target_table[tune_index].target_enable & OPTION_MASK_HTM))
4304 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_STRICT_ALIGN;
4306 /* -maltivec={le,be} implies -maltivec. */
4307 if (rs6000_altivec_element_order != 0)
4308 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
4310 /* Disallow -maltivec=le in big endian mode for now. This is not
4311 known to be useful for anyone. */
4312 if (BYTES_BIG_ENDIAN && rs6000_altivec_element_order == 1)
4314 warning (0, N_("-maltivec=le not allowed for big-endian targets"));
4315 rs6000_altivec_element_order = 0;
4318 /* Add some warnings for VSX. */
4319 if (TARGET_VSX)
4321 const char *msg = NULL;
4322 if (!TARGET_HARD_FLOAT || !TARGET_FPRS
4323 || !TARGET_SINGLE_FLOAT || !TARGET_DOUBLE_FLOAT)
4325 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4326 msg = N_("-mvsx requires hardware floating point");
4327 else
4329 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
4330 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4333 else if (TARGET_PAIRED_FLOAT)
4334 msg = N_("-mvsx and -mpaired are incompatible");
4335 else if (TARGET_AVOID_XFORM > 0)
4336 msg = N_("-mvsx needs indexed addressing");
4337 else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit
4338 & OPTION_MASK_ALTIVEC))
4340 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4341 msg = N_("-mvsx and -mno-altivec are incompatible");
4342 else
4343 msg = N_("-mno-altivec disables vsx");
4346 if (msg)
4348 warning (0, msg);
4349 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
4350 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4354 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
4355 the -mcpu setting to enable options that conflict. */
4356 if ((!TARGET_HARD_FLOAT || !TARGET_ALTIVEC || !TARGET_VSX)
4357 && (rs6000_isa_flags_explicit & (OPTION_MASK_SOFT_FLOAT
4358 | OPTION_MASK_ALTIVEC
4359 | OPTION_MASK_VSX)) != 0)
4360 rs6000_isa_flags &= ~((OPTION_MASK_P8_VECTOR | OPTION_MASK_CRYPTO
4361 | OPTION_MASK_DIRECT_MOVE)
4362 & ~rs6000_isa_flags_explicit);
4364 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4365 rs6000_print_isa_options (stderr, 0, "before defaults", rs6000_isa_flags);
4367 /* Handle explicit -mno-{altivec,vsx,power8-vector,power9-vector} and turn
4368 off all of the options that depend on those flags. */
4369 ignore_masks = rs6000_disable_incompatible_switches ();
4371 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
4372 unless the user explicitly used the -mno-<option> to disable the code. */
4373 if (TARGET_P9_VECTOR || TARGET_MODULO || TARGET_P9_DFORM_SCALAR
4374 || TARGET_P9_DFORM_VECTOR || TARGET_P9_DFORM_BOTH > 0)
4375 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4376 else if (TARGET_P9_MINMAX)
4378 if (have_cpu)
4380 if (cpu_index == PROCESSOR_POWER9)
4382 /* legacy behavior: allow -mcpu-power9 with certain
4383 capabilities explicitly disabled. */
4384 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4385 /* However, reject this automatic fix if certain
4386 capabilities required for TARGET_P9_MINMAX support
4387 have been explicitly disabled. */
4388 if (((OPTION_MASK_VSX | OPTION_MASK_UPPER_REGS_SF
4389 | OPTION_MASK_UPPER_REGS_DF) & rs6000_isa_flags)
4390 != (OPTION_MASK_VSX | OPTION_MASK_UPPER_REGS_SF
4391 | OPTION_MASK_UPPER_REGS_DF))
4392 error ("-mpower9-minmax incompatible with explicitly disabled options");
4394 else
4395 error ("Power9 target option is incompatible with -mcpu=<xxx> for "
4396 "<xxx> less than power9");
4398 else if ((ISA_3_0_MASKS_SERVER & rs6000_isa_flags_explicit)
4399 != (ISA_3_0_MASKS_SERVER & rs6000_isa_flags
4400 & rs6000_isa_flags_explicit))
4401 /* Enforce that none of the ISA_3_0_MASKS_SERVER flags
4402 were explicitly cleared. */
4403 error ("-mpower9-minmax incompatible with explicitly disabled options");
4404 else
4405 rs6000_isa_flags |= ISA_3_0_MASKS_SERVER;
4407 else if (TARGET_P8_VECTOR || TARGET_DIRECT_MOVE || TARGET_CRYPTO)
4408 rs6000_isa_flags |= (ISA_2_7_MASKS_SERVER & ~ignore_masks);
4409 else if (TARGET_VSX)
4410 rs6000_isa_flags |= (ISA_2_6_MASKS_SERVER & ~ignore_masks);
4411 else if (TARGET_POPCNTD)
4412 rs6000_isa_flags |= (ISA_2_6_MASKS_EMBEDDED & ~ignore_masks);
4413 else if (TARGET_DFP)
4414 rs6000_isa_flags |= (ISA_2_5_MASKS_SERVER & ~ignore_masks);
4415 else if (TARGET_CMPB)
4416 rs6000_isa_flags |= (ISA_2_5_MASKS_EMBEDDED & ~ignore_masks);
4417 else if (TARGET_FPRND)
4418 rs6000_isa_flags |= (ISA_2_4_MASKS & ~ignore_masks);
4419 else if (TARGET_POPCNTB)
4420 rs6000_isa_flags |= (ISA_2_2_MASKS & ~ignore_masks);
4421 else if (TARGET_ALTIVEC)
4422 rs6000_isa_flags |= (OPTION_MASK_PPC_GFXOPT & ~ignore_masks);
4424 if (TARGET_CRYPTO && !TARGET_ALTIVEC)
4426 if (rs6000_isa_flags_explicit & OPTION_MASK_CRYPTO)
4427 error ("-mcrypto requires -maltivec");
4428 rs6000_isa_flags &= ~OPTION_MASK_CRYPTO;
4431 if (TARGET_DIRECT_MOVE && !TARGET_VSX)
4433 if (rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
4434 error ("-mdirect-move requires -mvsx");
4435 rs6000_isa_flags &= ~OPTION_MASK_DIRECT_MOVE;
4438 if (TARGET_P8_VECTOR && !TARGET_ALTIVEC)
4440 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4441 error ("-mpower8-vector requires -maltivec");
4442 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4445 if (TARGET_P8_VECTOR && !TARGET_VSX)
4447 if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4448 && (rs6000_isa_flags_explicit & OPTION_MASK_VSX))
4449 error ("-mpower8-vector requires -mvsx");
4450 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR) == 0)
4452 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4453 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4454 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4456 else
4458 /* OPTION_MASK_P8_VECTOR is explicit, and OPTION_MASK_VSX is
4459 not explicit. */
4460 rs6000_isa_flags |= OPTION_MASK_VSX;
4461 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4465 if (TARGET_VSX_TIMODE && !TARGET_VSX)
4467 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX_TIMODE)
4468 error ("-mvsx-timode requires -mvsx");
4469 rs6000_isa_flags &= ~OPTION_MASK_VSX_TIMODE;
4472 if (TARGET_DFP && !TARGET_HARD_FLOAT)
4474 if (rs6000_isa_flags_explicit & OPTION_MASK_DFP)
4475 error ("-mhard-dfp requires -mhard-float");
4476 rs6000_isa_flags &= ~OPTION_MASK_DFP;
4479 /* Allow an explicit -mupper-regs to set -mupper-regs-df, -mupper-regs-di,
4480 and -mupper-regs-sf, depending on the cpu, unless the user explicitly also
4481 set the individual option. */
4482 if (TARGET_UPPER_REGS > 0)
4484 if (TARGET_VSX
4485 && !(rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DF))
4487 rs6000_isa_flags |= OPTION_MASK_UPPER_REGS_DF;
4488 rs6000_isa_flags_explicit |= OPTION_MASK_UPPER_REGS_DF;
4490 if (TARGET_VSX
4491 && !(rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DI))
4493 rs6000_isa_flags |= OPTION_MASK_UPPER_REGS_DI;
4494 rs6000_isa_flags_explicit |= OPTION_MASK_UPPER_REGS_DI;
4496 if (TARGET_P8_VECTOR
4497 && !(rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_SF))
4499 rs6000_isa_flags |= OPTION_MASK_UPPER_REGS_SF;
4500 rs6000_isa_flags_explicit |= OPTION_MASK_UPPER_REGS_SF;
4503 else if (TARGET_UPPER_REGS == 0)
4505 if (TARGET_VSX
4506 && !(rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DF))
4508 rs6000_isa_flags &= ~OPTION_MASK_UPPER_REGS_DF;
4509 rs6000_isa_flags_explicit |= OPTION_MASK_UPPER_REGS_DF;
4511 if (TARGET_VSX
4512 && !(rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DI))
4514 rs6000_isa_flags &= ~OPTION_MASK_UPPER_REGS_DI;
4515 rs6000_isa_flags_explicit |= OPTION_MASK_UPPER_REGS_DI;
4517 if (TARGET_P8_VECTOR
4518 && !(rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_SF))
4520 rs6000_isa_flags &= ~OPTION_MASK_UPPER_REGS_SF;
4521 rs6000_isa_flags_explicit |= OPTION_MASK_UPPER_REGS_SF;
4525 if (TARGET_UPPER_REGS_DF && !TARGET_VSX)
4527 if (rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DF)
4528 error ("-mupper-regs-df requires -mvsx");
4529 rs6000_isa_flags &= ~OPTION_MASK_UPPER_REGS_DF;
4532 if (TARGET_UPPER_REGS_DI && !TARGET_VSX)
4534 if (rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DI)
4535 error ("-mupper-regs-di requires -mvsx");
4536 rs6000_isa_flags &= ~OPTION_MASK_UPPER_REGS_DI;
4539 if (TARGET_UPPER_REGS_SF && !TARGET_P8_VECTOR)
4541 if (rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_SF)
4542 error ("-mupper-regs-sf requires -mpower8-vector");
4543 rs6000_isa_flags &= ~OPTION_MASK_UPPER_REGS_SF;
4546 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
4547 silently turn off quad memory mode. */
4548 if ((TARGET_QUAD_MEMORY || TARGET_QUAD_MEMORY_ATOMIC) && !TARGET_POWERPC64)
4550 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4551 warning (0, N_("-mquad-memory requires 64-bit mode"));
4553 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
4554 warning (0, N_("-mquad-memory-atomic requires 64-bit mode"));
4556 rs6000_isa_flags &= ~(OPTION_MASK_QUAD_MEMORY
4557 | OPTION_MASK_QUAD_MEMORY_ATOMIC);
4560 /* Non-atomic quad memory load/store are disabled for little endian, since
4561 the words are reversed, but atomic operations can still be done by
4562 swapping the words. */
4563 if (TARGET_QUAD_MEMORY && !WORDS_BIG_ENDIAN)
4565 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4566 warning (0, N_("-mquad-memory is not available in little endian mode"));
4568 rs6000_isa_flags &= ~OPTION_MASK_QUAD_MEMORY;
4571 /* Assume if the user asked for normal quad memory instructions, they want
4572 the atomic versions as well, unless they explicity told us not to use quad
4573 word atomic instructions. */
4574 if (TARGET_QUAD_MEMORY
4575 && !TARGET_QUAD_MEMORY_ATOMIC
4576 && ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) == 0))
4577 rs6000_isa_flags |= OPTION_MASK_QUAD_MEMORY_ATOMIC;
4579 /* Enable power8 fusion if we are tuning for power8, even if we aren't
4580 generating power8 instructions. */
4581 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION))
4582 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
4583 & OPTION_MASK_P8_FUSION);
4585 /* Setting additional fusion flags turns on base fusion. */
4586 if (!TARGET_P8_FUSION && (TARGET_P8_FUSION_SIGN || TARGET_TOC_FUSION))
4588 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4590 if (TARGET_P8_FUSION_SIGN)
4591 error ("-mpower8-fusion-sign requires -mpower8-fusion");
4593 if (TARGET_TOC_FUSION)
4594 error ("-mtoc-fusion requires -mpower8-fusion");
4596 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
4598 else
4599 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4602 /* Power9 fusion is a superset over power8 fusion. */
4603 if (TARGET_P9_FUSION && !TARGET_P8_FUSION)
4605 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4607 /* We prefer to not mention undocumented options in
4608 error messages. However, if users have managed to select
4609 power9-fusion without selecting power8-fusion, they
4610 already know about undocumented flags. */
4611 error ("-mpower9-fusion requires -mpower8-fusion");
4612 rs6000_isa_flags &= ~OPTION_MASK_P9_FUSION;
4614 else
4615 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4618 /* Enable power9 fusion if we are tuning for power9, even if we aren't
4619 generating power9 instructions. */
4620 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_FUSION))
4621 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
4622 & OPTION_MASK_P9_FUSION);
4624 /* Power8 does not fuse sign extended loads with the addis. If we are
4625 optimizing at high levels for speed, convert a sign extended load into a
4626 zero extending load, and an explicit sign extension. */
4627 if (TARGET_P8_FUSION
4628 && !(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION_SIGN)
4629 && optimize_function_for_speed_p (cfun)
4630 && optimize >= 3)
4631 rs6000_isa_flags |= OPTION_MASK_P8_FUSION_SIGN;
4633 /* TOC fusion requires 64-bit and medium/large code model. */
4634 if (TARGET_TOC_FUSION && !TARGET_POWERPC64)
4636 rs6000_isa_flags &= ~OPTION_MASK_TOC_FUSION;
4637 if ((rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION) != 0)
4638 warning (0, N_("-mtoc-fusion requires 64-bit"));
4641 if (TARGET_TOC_FUSION && (TARGET_CMODEL == CMODEL_SMALL))
4643 rs6000_isa_flags &= ~OPTION_MASK_TOC_FUSION;
4644 if ((rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION) != 0)
4645 warning (0, N_("-mtoc-fusion requires medium/large code model"));
4648 /* Turn on -mtoc-fusion by default if p8-fusion and 64-bit medium/large code
4649 model. */
4650 if (TARGET_P8_FUSION && !TARGET_TOC_FUSION && TARGET_POWERPC64
4651 && (TARGET_CMODEL != CMODEL_SMALL)
4652 && !(rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION))
4653 rs6000_isa_flags |= OPTION_MASK_TOC_FUSION;
4655 /* ISA 3.0 vector instructions include ISA 2.07. */
4656 if (TARGET_P9_VECTOR && !TARGET_P8_VECTOR)
4658 /* We prefer to not mention undocumented options in
4659 error messages. However, if users have managed to select
4660 power9-vector without selecting power8-vector, they
4661 already know about undocumented flags. */
4662 if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) &&
4663 (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR))
4664 error ("-mpower9-vector requires -mpower8-vector");
4665 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) == 0)
4667 rs6000_isa_flags &= ~OPTION_MASK_P9_VECTOR;
4668 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4669 rs6000_isa_flags_explicit |= OPTION_MASK_P9_VECTOR;
4671 else
4673 /* OPTION_MASK_P9_VECTOR is explicit and
4674 OPTION_MASK_P8_VECTOR is not explicit. */
4675 rs6000_isa_flags |= OPTION_MASK_P8_VECTOR;
4676 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4680 /* -mpower9-dform turns on both -mpower9-dform-scalar and
4681 -mpower9-dform-vector. */
4682 if (TARGET_P9_DFORM_BOTH > 0)
4684 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_VECTOR))
4685 rs6000_isa_flags |= OPTION_MASK_P9_DFORM_VECTOR;
4687 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_SCALAR))
4688 rs6000_isa_flags |= OPTION_MASK_P9_DFORM_SCALAR;
4690 else if (TARGET_P9_DFORM_BOTH == 0)
4692 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_VECTOR))
4693 rs6000_isa_flags &= ~OPTION_MASK_P9_DFORM_VECTOR;
4695 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_SCALAR))
4696 rs6000_isa_flags &= ~OPTION_MASK_P9_DFORM_SCALAR;
4699 /* ISA 3.0 D-form instructions require p9-vector and upper-regs. */
4700 if ((TARGET_P9_DFORM_SCALAR || TARGET_P9_DFORM_VECTOR) && !TARGET_P9_VECTOR)
4702 /* We prefer to not mention undocumented options in
4703 error messages. However, if users have managed to select
4704 power9-dform without selecting power9-vector, they
4705 already know about undocumented flags. */
4706 if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR)
4707 && (rs6000_isa_flags_explicit & (OPTION_MASK_P9_DFORM_SCALAR
4708 | OPTION_MASK_P9_DFORM_VECTOR)))
4709 error ("-mpower9-dform requires -mpower9-vector");
4710 else if (rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR)
4712 rs6000_isa_flags &=
4713 ~(OPTION_MASK_P9_DFORM_SCALAR | OPTION_MASK_P9_DFORM_VECTOR);
4714 rs6000_isa_flags_explicit |=
4715 (OPTION_MASK_P9_DFORM_SCALAR | OPTION_MASK_P9_DFORM_VECTOR);
4717 else
4719 /* We know that OPTION_MASK_P9_VECTOR is not explicit and
4720 OPTION_MASK_P9_DFORM_SCALAR or OPTION_MASK_P9_DORM_VECTOR
4721 may be explicit. */
4722 rs6000_isa_flags |= OPTION_MASK_P9_VECTOR;
4723 rs6000_isa_flags_explicit |= OPTION_MASK_P9_VECTOR;
4727 if ((TARGET_P9_DFORM_SCALAR || TARGET_P9_DFORM_VECTOR)
4728 && !TARGET_DIRECT_MOVE)
4730 /* We prefer to not mention undocumented options in
4731 error messages. However, if users have managed to select
4732 power9-dform without selecting direct-move, they
4733 already know about undocumented flags. */
4734 if ((rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
4735 && ((rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_VECTOR) ||
4736 (rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_SCALAR) ||
4737 (TARGET_P9_DFORM_BOTH == 1)))
4738 error ("-mpower9-dform, -mpower9-dform-vector, -mpower9-dform-scalar"
4739 " require -mdirect-move");
4740 else if ((rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE) == 0)
4742 rs6000_isa_flags |= OPTION_MASK_DIRECT_MOVE;
4743 rs6000_isa_flags_explicit |= OPTION_MASK_DIRECT_MOVE;
4745 else
4747 rs6000_isa_flags &=
4748 ~(OPTION_MASK_P9_DFORM_SCALAR | OPTION_MASK_P9_DFORM_VECTOR);
4749 rs6000_isa_flags_explicit |=
4750 (OPTION_MASK_P9_DFORM_SCALAR | OPTION_MASK_P9_DFORM_VECTOR);
4754 if (TARGET_P9_DFORM_SCALAR && !TARGET_UPPER_REGS_DF)
4756 /* We prefer to not mention undocumented options in
4757 error messages. However, if users have managed to select
4758 power9-dform without selecting upper-regs-df, they
4759 already know about undocumented flags. */
4760 if (rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DF)
4761 error ("-mpower9-dform requires -mupper-regs-df");
4762 rs6000_isa_flags &= ~OPTION_MASK_P9_DFORM_SCALAR;
4765 if (TARGET_P9_DFORM_SCALAR && !TARGET_UPPER_REGS_SF)
4767 if (rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_SF)
4768 error ("-mpower9-dform requires -mupper-regs-sf");
4769 rs6000_isa_flags &= ~OPTION_MASK_P9_DFORM_SCALAR;
4772 /* Enable LRA by default. */
4773 if ((rs6000_isa_flags_explicit & OPTION_MASK_LRA) == 0)
4774 rs6000_isa_flags |= OPTION_MASK_LRA;
4776 /* There have been bugs with -mvsx-timode that don't show up with -mlra,
4777 but do show up with -mno-lra. Given -mlra will become the default once
4778 PR 69847 is fixed, turn off the options with problems by default if
4779 -mno-lra was used, and warn if the user explicitly asked for the option.
4781 Enable -mpower9-dform-vector by default if LRA and other power9 options.
4782 Enable -mvsx-timode by default if LRA and VSX. */
4783 if (!TARGET_LRA)
4785 if (TARGET_VSX_TIMODE)
4787 if ((rs6000_isa_flags_explicit & OPTION_MASK_VSX_TIMODE) != 0)
4788 warning (0, "-mvsx-timode might need -mlra");
4790 else
4791 rs6000_isa_flags &= ~OPTION_MASK_VSX_TIMODE;
4795 else
4797 if (TARGET_VSX && !TARGET_VSX_TIMODE
4798 && (rs6000_isa_flags_explicit & OPTION_MASK_VSX_TIMODE) == 0)
4799 rs6000_isa_flags |= OPTION_MASK_VSX_TIMODE;
4802 /* Set -mallow-movmisalign to explicitly on if we have full ISA 2.07
4803 support. If we only have ISA 2.06 support, and the user did not specify
4804 the switch, leave it set to -1 so the movmisalign patterns are enabled,
4805 but we don't enable the full vectorization support */
4806 if (TARGET_ALLOW_MOVMISALIGN == -1 && TARGET_P8_VECTOR && TARGET_DIRECT_MOVE)
4807 TARGET_ALLOW_MOVMISALIGN = 1;
4809 else if (TARGET_ALLOW_MOVMISALIGN && !TARGET_VSX)
4811 if (TARGET_ALLOW_MOVMISALIGN > 0
4812 && global_options_set.x_TARGET_ALLOW_MOVMISALIGN)
4813 error ("-mallow-movmisalign requires -mvsx");
4815 TARGET_ALLOW_MOVMISALIGN = 0;
4818 /* Determine when unaligned vector accesses are permitted, and when
4819 they are preferred over masked Altivec loads. Note that if
4820 TARGET_ALLOW_MOVMISALIGN has been disabled by the user, then
4821 TARGET_EFFICIENT_UNALIGNED_VSX must be as well. The converse is
4822 not true. */
4823 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4825 if (!TARGET_VSX)
4827 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4828 error ("-mefficient-unaligned-vsx requires -mvsx");
4830 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4833 else if (!TARGET_ALLOW_MOVMISALIGN)
4835 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4836 error ("-mefficient-unaligned-vsx requires -mallow-movmisalign");
4838 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4842 /* Check whether we should allow small integers into VSX registers. We
4843 require direct move to prevent the register allocator from having to move
4844 variables through memory to do moves. SImode can be used on ISA 2.07,
4845 while HImode and QImode require ISA 3.0. */
4846 if (TARGET_VSX_SMALL_INTEGER
4847 && (!TARGET_DIRECT_MOVE || !TARGET_P8_VECTOR || !TARGET_UPPER_REGS_DI))
4849 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX_SMALL_INTEGER)
4850 error ("-mvsx-small-integer requires -mpower8-vector, "
4851 "-mupper-regs-di, and -mdirect-move");
4853 rs6000_isa_flags &= ~OPTION_MASK_VSX_SMALL_INTEGER;
4856 /* Set long double size before the IEEE 128-bit tests. */
4857 if (!global_options_set.x_rs6000_long_double_type_size)
4859 if (main_target_opt != NULL
4860 && (main_target_opt->x_rs6000_long_double_type_size
4861 != RS6000_DEFAULT_LONG_DOUBLE_SIZE))
4862 error ("target attribute or pragma changes long double size");
4863 else
4864 rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
4867 /* Set -mabi=ieeelongdouble on some old targets. Note, AIX and Darwin
4868 explicitly redefine TARGET_IEEEQUAD to 0, so those systems will not
4869 pick up this default. */
4870 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
4871 if (!global_options_set.x_rs6000_ieeequad)
4872 rs6000_ieeequad = 1;
4873 #endif
4875 /* Enable the default support for IEEE 128-bit floating point on Linux VSX
4876 sytems, but don't enable the __float128 keyword. */
4877 if (TARGET_VSX && TARGET_LONG_DOUBLE_128
4878 && (TARGET_FLOAT128_ENABLE_TYPE || TARGET_IEEEQUAD)
4879 && ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_TYPE) == 0))
4880 rs6000_isa_flags |= OPTION_MASK_FLOAT128_TYPE;
4882 /* IEEE 128-bit floating point requires VSX support. */
4883 if (!TARGET_VSX)
4885 if (TARGET_FLOAT128_KEYWORD)
4887 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) != 0)
4888 error ("-mfloat128 requires VSX support");
4890 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_TYPE
4891 | OPTION_MASK_FLOAT128_KEYWORD
4892 | OPTION_MASK_FLOAT128_HW);
4895 else if (TARGET_FLOAT128_TYPE)
4897 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_TYPE) != 0)
4898 error ("-mfloat128-type requires VSX support");
4900 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_TYPE
4901 | OPTION_MASK_FLOAT128_KEYWORD
4902 | OPTION_MASK_FLOAT128_HW);
4906 /* -mfloat128 and -mfloat128-hardware internally require the underlying IEEE
4907 128-bit floating point support to be enabled. */
4908 if (!TARGET_FLOAT128_TYPE)
4910 if (TARGET_FLOAT128_KEYWORD)
4912 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) != 0)
4914 error ("-mfloat128 requires -mfloat128-type");
4915 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_TYPE
4916 | OPTION_MASK_FLOAT128_KEYWORD
4917 | OPTION_MASK_FLOAT128_HW);
4919 else
4920 rs6000_isa_flags |= OPTION_MASK_FLOAT128_TYPE;
4923 if (TARGET_FLOAT128_HW)
4925 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4927 error ("-mfloat128-hardware requires -mfloat128-type");
4928 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4930 else
4931 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_TYPE
4932 | OPTION_MASK_FLOAT128_KEYWORD
4933 | OPTION_MASK_FLOAT128_HW);
4937 /* If we have -mfloat128-type and full ISA 3.0 support, enable
4938 -mfloat128-hardware by default. However, don't enable the __float128
4939 keyword. If the user explicitly turned on -mfloat128-hardware, enable the
4940 -mfloat128 option as well if it was not already set. */
4941 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_HW
4942 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) == ISA_3_0_MASKS_IEEE
4943 && !(rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW))
4944 rs6000_isa_flags |= OPTION_MASK_FLOAT128_HW;
4946 if (TARGET_FLOAT128_HW
4947 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) != ISA_3_0_MASKS_IEEE)
4949 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4950 error ("-mfloat128-hardware requires full ISA 3.0 support");
4952 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4955 if (TARGET_FLOAT128_HW && !TARGET_64BIT)
4957 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4958 error ("-mfloat128-hardware requires -m64");
4960 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4963 if (TARGET_FLOAT128_HW && !TARGET_FLOAT128_KEYWORD
4964 && (rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0
4965 && (rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) == 0)
4966 rs6000_isa_flags |= OPTION_MASK_FLOAT128_KEYWORD;
4968 /* Print the options after updating the defaults. */
4969 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4970 rs6000_print_isa_options (stderr, 0, "after defaults", rs6000_isa_flags);
4972 /* E500mc does "better" if we inline more aggressively. Respect the
4973 user's opinion, though. */
4974 if (rs6000_block_move_inline_limit == 0
4975 && (rs6000_cpu == PROCESSOR_PPCE500MC
4976 || rs6000_cpu == PROCESSOR_PPCE500MC64
4977 || rs6000_cpu == PROCESSOR_PPCE5500
4978 || rs6000_cpu == PROCESSOR_PPCE6500))
4979 rs6000_block_move_inline_limit = 128;
4981 /* store_one_arg depends on expand_block_move to handle at least the
4982 size of reg_parm_stack_space. */
4983 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
4984 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
4986 if (global_init_p)
4988 /* If the appropriate debug option is enabled, replace the target hooks
4989 with debug versions that call the real version and then prints
4990 debugging information. */
4991 if (TARGET_DEBUG_COST)
4993 targetm.rtx_costs = rs6000_debug_rtx_costs;
4994 targetm.address_cost = rs6000_debug_address_cost;
4995 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
4998 if (TARGET_DEBUG_ADDR)
5000 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
5001 targetm.legitimize_address = rs6000_debug_legitimize_address;
5002 rs6000_secondary_reload_class_ptr
5003 = rs6000_debug_secondary_reload_class;
5004 rs6000_secondary_memory_needed_ptr
5005 = rs6000_debug_secondary_memory_needed;
5006 rs6000_cannot_change_mode_class_ptr
5007 = rs6000_debug_cannot_change_mode_class;
5008 rs6000_preferred_reload_class_ptr
5009 = rs6000_debug_preferred_reload_class;
5010 rs6000_legitimize_reload_address_ptr
5011 = rs6000_debug_legitimize_reload_address;
5012 rs6000_mode_dependent_address_ptr
5013 = rs6000_debug_mode_dependent_address;
5016 if (rs6000_veclibabi_name)
5018 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
5019 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
5020 else
5022 error ("unknown vectorization library ABI type (%s) for "
5023 "-mveclibabi= switch", rs6000_veclibabi_name);
5024 ret = false;
5029 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
5030 target attribute or pragma which automatically enables both options,
5031 unless the altivec ABI was set. This is set by default for 64-bit, but
5032 not for 32-bit. */
5033 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
5034 rs6000_isa_flags &= ~((OPTION_MASK_VSX | OPTION_MASK_ALTIVEC
5035 | OPTION_MASK_FLOAT128_TYPE
5036 | OPTION_MASK_FLOAT128_KEYWORD)
5037 & ~rs6000_isa_flags_explicit);
5039 /* Enable Altivec ABI for AIX -maltivec. */
5040 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
5042 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
5043 error ("target attribute or pragma changes AltiVec ABI");
5044 else
5045 rs6000_altivec_abi = 1;
5048 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
5049 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
5050 be explicitly overridden in either case. */
5051 if (TARGET_ELF)
5053 if (!global_options_set.x_rs6000_altivec_abi
5054 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
5056 if (main_target_opt != NULL &&
5057 !main_target_opt->x_rs6000_altivec_abi)
5058 error ("target attribute or pragma changes AltiVec ABI");
5059 else
5060 rs6000_altivec_abi = 1;
5064 /* Set the Darwin64 ABI as default for 64-bit Darwin.
5065 So far, the only darwin64 targets are also MACH-O. */
5066 if (TARGET_MACHO
5067 && DEFAULT_ABI == ABI_DARWIN
5068 && TARGET_64BIT)
5070 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
5071 error ("target attribute or pragma changes darwin64 ABI");
5072 else
5074 rs6000_darwin64_abi = 1;
5075 /* Default to natural alignment, for better performance. */
5076 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
5080 /* Place FP constants in the constant pool instead of TOC
5081 if section anchors enabled. */
5082 if (flag_section_anchors
5083 && !global_options_set.x_TARGET_NO_FP_IN_TOC)
5084 TARGET_NO_FP_IN_TOC = 1;
5086 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
5087 rs6000_print_isa_options (stderr, 0, "before subtarget", rs6000_isa_flags);
5089 #ifdef SUBTARGET_OVERRIDE_OPTIONS
5090 SUBTARGET_OVERRIDE_OPTIONS;
5091 #endif
5092 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
5093 SUBSUBTARGET_OVERRIDE_OPTIONS;
5094 #endif
5095 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
5096 SUB3TARGET_OVERRIDE_OPTIONS;
5097 #endif
5099 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
5100 rs6000_print_isa_options (stderr, 0, "after subtarget", rs6000_isa_flags);
5102 /* For the E500 family of cores, reset the single/double FP flags to let us
5103 check that they remain constant across attributes or pragmas. Also,
5104 clear a possible request for string instructions, not supported and which
5105 we might have silently queried above for -Os.
5107 For other families, clear ISEL in case it was set implicitly.
5110 switch (rs6000_cpu)
5112 case PROCESSOR_PPC8540:
5113 case PROCESSOR_PPC8548:
5114 case PROCESSOR_PPCE500MC:
5115 case PROCESSOR_PPCE500MC64:
5116 case PROCESSOR_PPCE5500:
5117 case PROCESSOR_PPCE6500:
5119 rs6000_single_float = TARGET_E500_SINGLE || TARGET_E500_DOUBLE;
5120 rs6000_double_float = TARGET_E500_DOUBLE;
5122 rs6000_isa_flags &= ~OPTION_MASK_STRING;
5124 break;
5126 default:
5128 if (have_cpu && !(rs6000_isa_flags_explicit & OPTION_MASK_ISEL))
5129 rs6000_isa_flags &= ~OPTION_MASK_ISEL;
5131 break;
5134 if (main_target_opt)
5136 if (main_target_opt->x_rs6000_single_float != rs6000_single_float)
5137 error ("target attribute or pragma changes single precision floating "
5138 "point");
5139 if (main_target_opt->x_rs6000_double_float != rs6000_double_float)
5140 error ("target attribute or pragma changes double precision floating "
5141 "point");
5144 /* Detect invalid option combinations with E500. */
5145 CHECK_E500_OPTIONS;
5147 rs6000_always_hint = (rs6000_cpu != PROCESSOR_POWER4
5148 && rs6000_cpu != PROCESSOR_POWER5
5149 && rs6000_cpu != PROCESSOR_POWER6
5150 && rs6000_cpu != PROCESSOR_POWER7
5151 && rs6000_cpu != PROCESSOR_POWER8
5152 && rs6000_cpu != PROCESSOR_POWER9
5153 && rs6000_cpu != PROCESSOR_PPCA2
5154 && rs6000_cpu != PROCESSOR_CELL
5155 && rs6000_cpu != PROCESSOR_PPC476);
5156 rs6000_sched_groups = (rs6000_cpu == PROCESSOR_POWER4
5157 || rs6000_cpu == PROCESSOR_POWER5
5158 || rs6000_cpu == PROCESSOR_POWER7
5159 || rs6000_cpu == PROCESSOR_POWER8);
5160 rs6000_align_branch_targets = (rs6000_cpu == PROCESSOR_POWER4
5161 || rs6000_cpu == PROCESSOR_POWER5
5162 || rs6000_cpu == PROCESSOR_POWER6
5163 || rs6000_cpu == PROCESSOR_POWER7
5164 || rs6000_cpu == PROCESSOR_POWER8
5165 || rs6000_cpu == PROCESSOR_POWER9
5166 || rs6000_cpu == PROCESSOR_PPCE500MC
5167 || rs6000_cpu == PROCESSOR_PPCE500MC64
5168 || rs6000_cpu == PROCESSOR_PPCE5500
5169 || rs6000_cpu == PROCESSOR_PPCE6500);
5171 /* Allow debug switches to override the above settings. These are set to -1
5172 in powerpcspe.opt to indicate the user hasn't directly set the switch. */
5173 if (TARGET_ALWAYS_HINT >= 0)
5174 rs6000_always_hint = TARGET_ALWAYS_HINT;
5176 if (TARGET_SCHED_GROUPS >= 0)
5177 rs6000_sched_groups = TARGET_SCHED_GROUPS;
5179 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
5180 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
5182 rs6000_sched_restricted_insns_priority
5183 = (rs6000_sched_groups ? 1 : 0);
5185 /* Handle -msched-costly-dep option. */
5186 rs6000_sched_costly_dep
5187 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
5189 if (rs6000_sched_costly_dep_str)
5191 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
5192 rs6000_sched_costly_dep = no_dep_costly;
5193 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
5194 rs6000_sched_costly_dep = all_deps_costly;
5195 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
5196 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
5197 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
5198 rs6000_sched_costly_dep = store_to_load_dep_costly;
5199 else
5200 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
5201 atoi (rs6000_sched_costly_dep_str));
5204 /* Handle -minsert-sched-nops option. */
5205 rs6000_sched_insert_nops
5206 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
5208 if (rs6000_sched_insert_nops_str)
5210 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
5211 rs6000_sched_insert_nops = sched_finish_none;
5212 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
5213 rs6000_sched_insert_nops = sched_finish_pad_groups;
5214 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
5215 rs6000_sched_insert_nops = sched_finish_regroup_exact;
5216 else
5217 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
5218 atoi (rs6000_sched_insert_nops_str));
5221 /* Handle stack protector */
5222 if (!global_options_set.x_rs6000_stack_protector_guard)
5223 #ifdef TARGET_THREAD_SSP_OFFSET
5224 rs6000_stack_protector_guard = SSP_TLS;
5225 #else
5226 rs6000_stack_protector_guard = SSP_GLOBAL;
5227 #endif
5229 #ifdef TARGET_THREAD_SSP_OFFSET
5230 rs6000_stack_protector_guard_offset = TARGET_THREAD_SSP_OFFSET;
5231 rs6000_stack_protector_guard_reg = TARGET_64BIT ? 13 : 2;
5232 #endif
5234 if (global_options_set.x_rs6000_stack_protector_guard_offset_str)
5236 char *endp;
5237 const char *str = rs6000_stack_protector_guard_offset_str;
5239 errno = 0;
5240 long offset = strtol (str, &endp, 0);
5241 if (!*str || *endp || errno)
5242 error ("%qs is not a valid number "
5243 "in -mstack-protector-guard-offset=", str);
5245 if (!IN_RANGE (offset, -0x8000, 0x7fff)
5246 || (TARGET_64BIT && (offset & 3)))
5247 error ("%qs is not a valid offset "
5248 "in -mstack-protector-guard-offset=", str);
5250 rs6000_stack_protector_guard_offset = offset;
5253 if (global_options_set.x_rs6000_stack_protector_guard_reg_str)
5255 const char *str = rs6000_stack_protector_guard_reg_str;
5256 int reg = decode_reg_name (str);
5258 if (!IN_RANGE (reg, 1, 31))
5259 error ("%qs is not a valid base register "
5260 "in -mstack-protector-guard-reg=", str);
5262 rs6000_stack_protector_guard_reg = reg;
5265 if (rs6000_stack_protector_guard == SSP_TLS
5266 && !IN_RANGE (rs6000_stack_protector_guard_reg, 1, 31))
5267 error ("-mstack-protector-guard=tls needs a valid base register");
5269 if (global_init_p)
5271 #ifdef TARGET_REGNAMES
5272 /* If the user desires alternate register names, copy in the
5273 alternate names now. */
5274 if (TARGET_REGNAMES)
5275 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
5276 #endif
5278 /* Set aix_struct_return last, after the ABI is determined.
5279 If -maix-struct-return or -msvr4-struct-return was explicitly
5280 used, don't override with the ABI default. */
5281 if (!global_options_set.x_aix_struct_return)
5282 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
5284 #if 0
5285 /* IBM XL compiler defaults to unsigned bitfields. */
5286 if (TARGET_XL_COMPAT)
5287 flag_signed_bitfields = 0;
5288 #endif
5290 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
5291 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
5293 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
5295 /* We can only guarantee the availability of DI pseudo-ops when
5296 assembling for 64-bit targets. */
5297 if (!TARGET_64BIT)
5299 targetm.asm_out.aligned_op.di = NULL;
5300 targetm.asm_out.unaligned_op.di = NULL;
5304 /* Set branch target alignment, if not optimizing for size. */
5305 if (!optimize_size)
5307 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
5308 aligned 8byte to avoid misprediction by the branch predictor. */
5309 if (rs6000_cpu == PROCESSOR_TITAN
5310 || rs6000_cpu == PROCESSOR_CELL)
5312 if (align_functions <= 0)
5313 align_functions = 8;
5314 if (align_jumps <= 0)
5315 align_jumps = 8;
5316 if (align_loops <= 0)
5317 align_loops = 8;
5319 if (rs6000_align_branch_targets)
5321 if (align_functions <= 0)
5322 align_functions = 16;
5323 if (align_jumps <= 0)
5324 align_jumps = 16;
5325 if (align_loops <= 0)
5327 can_override_loop_align = 1;
5328 align_loops = 16;
5331 if (align_jumps_max_skip <= 0)
5332 align_jumps_max_skip = 15;
5333 if (align_loops_max_skip <= 0)
5334 align_loops_max_skip = 15;
5337 /* Arrange to save and restore machine status around nested functions. */
5338 init_machine_status = rs6000_init_machine_status;
5340 /* We should always be splitting complex arguments, but we can't break
5341 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
5342 if (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
5343 targetm.calls.split_complex_arg = NULL;
5345 /* The AIX and ELFv1 ABIs define standard function descriptors. */
5346 if (DEFAULT_ABI == ABI_AIX)
5347 targetm.calls.custom_function_descriptors = 0;
5350 /* Initialize rs6000_cost with the appropriate target costs. */
5351 if (optimize_size)
5352 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
5353 else
5354 switch (rs6000_cpu)
5356 case PROCESSOR_RS64A:
5357 rs6000_cost = &rs64a_cost;
5358 break;
5360 case PROCESSOR_MPCCORE:
5361 rs6000_cost = &mpccore_cost;
5362 break;
5364 case PROCESSOR_PPC403:
5365 rs6000_cost = &ppc403_cost;
5366 break;
5368 case PROCESSOR_PPC405:
5369 rs6000_cost = &ppc405_cost;
5370 break;
5372 case PROCESSOR_PPC440:
5373 rs6000_cost = &ppc440_cost;
5374 break;
5376 case PROCESSOR_PPC476:
5377 rs6000_cost = &ppc476_cost;
5378 break;
5380 case PROCESSOR_PPC601:
5381 rs6000_cost = &ppc601_cost;
5382 break;
5384 case PROCESSOR_PPC603:
5385 rs6000_cost = &ppc603_cost;
5386 break;
5388 case PROCESSOR_PPC604:
5389 rs6000_cost = &ppc604_cost;
5390 break;
5392 case PROCESSOR_PPC604e:
5393 rs6000_cost = &ppc604e_cost;
5394 break;
5396 case PROCESSOR_PPC620:
5397 rs6000_cost = &ppc620_cost;
5398 break;
5400 case PROCESSOR_PPC630:
5401 rs6000_cost = &ppc630_cost;
5402 break;
5404 case PROCESSOR_CELL:
5405 rs6000_cost = &ppccell_cost;
5406 break;
5408 case PROCESSOR_PPC750:
5409 case PROCESSOR_PPC7400:
5410 rs6000_cost = &ppc750_cost;
5411 break;
5413 case PROCESSOR_PPC7450:
5414 rs6000_cost = &ppc7450_cost;
5415 break;
5417 case PROCESSOR_PPC8540:
5418 case PROCESSOR_PPC8548:
5419 rs6000_cost = &ppc8540_cost;
5420 break;
5422 case PROCESSOR_PPCE300C2:
5423 case PROCESSOR_PPCE300C3:
5424 rs6000_cost = &ppce300c2c3_cost;
5425 break;
5427 case PROCESSOR_PPCE500MC:
5428 rs6000_cost = &ppce500mc_cost;
5429 break;
5431 case PROCESSOR_PPCE500MC64:
5432 rs6000_cost = &ppce500mc64_cost;
5433 break;
5435 case PROCESSOR_PPCE5500:
5436 rs6000_cost = &ppce5500_cost;
5437 break;
5439 case PROCESSOR_PPCE6500:
5440 rs6000_cost = &ppce6500_cost;
5441 break;
5443 case PROCESSOR_TITAN:
5444 rs6000_cost = &titan_cost;
5445 break;
5447 case PROCESSOR_POWER4:
5448 case PROCESSOR_POWER5:
5449 rs6000_cost = &power4_cost;
5450 break;
5452 case PROCESSOR_POWER6:
5453 rs6000_cost = &power6_cost;
5454 break;
5456 case PROCESSOR_POWER7:
5457 rs6000_cost = &power7_cost;
5458 break;
5460 case PROCESSOR_POWER8:
5461 rs6000_cost = &power8_cost;
5462 break;
5464 case PROCESSOR_POWER9:
5465 rs6000_cost = &power9_cost;
5466 break;
5468 case PROCESSOR_PPCA2:
5469 rs6000_cost = &ppca2_cost;
5470 break;
5472 default:
5473 gcc_unreachable ();
5476 if (global_init_p)
5478 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
5479 rs6000_cost->simultaneous_prefetches,
5480 global_options.x_param_values,
5481 global_options_set.x_param_values);
5482 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
5483 global_options.x_param_values,
5484 global_options_set.x_param_values);
5485 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
5486 rs6000_cost->cache_line_size,
5487 global_options.x_param_values,
5488 global_options_set.x_param_values);
5489 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
5490 global_options.x_param_values,
5491 global_options_set.x_param_values);
5493 /* Increase loop peeling limits based on performance analysis. */
5494 maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
5495 global_options.x_param_values,
5496 global_options_set.x_param_values);
5497 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
5498 global_options.x_param_values,
5499 global_options_set.x_param_values);
5501 /* Use the 'model' -fsched-pressure algorithm by default. */
5502 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM,
5503 SCHED_PRESSURE_MODEL,
5504 global_options.x_param_values,
5505 global_options_set.x_param_values);
5507 /* If using typedef char *va_list, signal that
5508 __builtin_va_start (&ap, 0) can be optimized to
5509 ap = __builtin_next_arg (0). */
5510 if (DEFAULT_ABI != ABI_V4)
5511 targetm.expand_builtin_va_start = NULL;
5514 /* Set up single/double float flags.
5515 If TARGET_HARD_FLOAT is set, but neither single or double is set,
5516 then set both flags. */
5517 if (TARGET_HARD_FLOAT && TARGET_FPRS
5518 && rs6000_single_float == 0 && rs6000_double_float == 0)
5519 rs6000_single_float = rs6000_double_float = 1;
5521 /* If not explicitly specified via option, decide whether to generate indexed
5522 load/store instructions. A value of -1 indicates that the
5523 initial value of this variable has not been overwritten. During
5524 compilation, TARGET_AVOID_XFORM is either 0 or 1. */
5525 if (TARGET_AVOID_XFORM == -1)
5526 /* Avoid indexed addressing when targeting Power6 in order to avoid the
5527 DERAT mispredict penalty. However the LVE and STVE altivec instructions
5528 need indexed accesses and the type used is the scalar type of the element
5529 being loaded or stored. */
5530 TARGET_AVOID_XFORM = (rs6000_cpu == PROCESSOR_POWER6 && TARGET_CMPB
5531 && !TARGET_ALTIVEC);
5533 /* Set the -mrecip options. */
5534 if (rs6000_recip_name)
5536 char *p = ASTRDUP (rs6000_recip_name);
5537 char *q;
5538 unsigned int mask, i;
5539 bool invert;
5541 while ((q = strtok (p, ",")) != NULL)
5543 p = NULL;
5544 if (*q == '!')
5546 invert = true;
5547 q++;
5549 else
5550 invert = false;
5552 if (!strcmp (q, "default"))
5553 mask = ((TARGET_RECIP_PRECISION)
5554 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
5555 else
5557 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
5558 if (!strcmp (q, recip_options[i].string))
5560 mask = recip_options[i].mask;
5561 break;
5564 if (i == ARRAY_SIZE (recip_options))
5566 error ("unknown option for -mrecip=%s", q);
5567 invert = false;
5568 mask = 0;
5569 ret = false;
5573 if (invert)
5574 rs6000_recip_control &= ~mask;
5575 else
5576 rs6000_recip_control |= mask;
5580 /* Set the builtin mask of the various options used that could affect which
5581 builtins were used. In the past we used target_flags, but we've run out
5582 of bits, and some options like SPE and PAIRED are no longer in
5583 target_flags. */
5584 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
5585 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
5586 rs6000_print_builtin_options (stderr, 0, "builtin mask",
5587 rs6000_builtin_mask);
5589 /* Initialize all of the registers. */
5590 rs6000_init_hard_regno_mode_ok (global_init_p);
5592 /* Save the initial options in case the user does function specific options */
5593 if (global_init_p)
5594 target_option_default_node = target_option_current_node
5595 = build_target_option_node (&global_options);
5597 /* If not explicitly specified via option, decide whether to generate the
5598 extra blr's required to preserve the link stack on some cpus (eg, 476). */
5599 if (TARGET_LINK_STACK == -1)
5600 SET_TARGET_LINK_STACK (rs6000_cpu == PROCESSOR_PPC476 && flag_pic);
5602 return ret;
5605 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
5606 define the target cpu type. */
5608 static void
5609 rs6000_option_override (void)
5611 (void) rs6000_option_override_internal (true);
5615 /* Implement targetm.vectorize.builtin_mask_for_load. */
5616 static tree
5617 rs6000_builtin_mask_for_load (void)
5619 /* Don't use lvsl/vperm for P8 and similarly efficient machines. */
5620 if ((TARGET_ALTIVEC && !TARGET_VSX)
5621 || (TARGET_VSX && !TARGET_EFFICIENT_UNALIGNED_VSX))
5622 return altivec_builtin_mask_for_load;
5623 else
5624 return 0;
5627 /* Implement LOOP_ALIGN. */
5629 rs6000_loop_align (rtx label)
5631 basic_block bb;
5632 int ninsns;
5634 /* Don't override loop alignment if -falign-loops was specified. */
5635 if (!can_override_loop_align)
5636 return align_loops_log;
5638 bb = BLOCK_FOR_INSN (label);
5639 ninsns = num_loop_insns(bb->loop_father);
5641 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
5642 if (ninsns > 4 && ninsns <= 8
5643 && (rs6000_cpu == PROCESSOR_POWER4
5644 || rs6000_cpu == PROCESSOR_POWER5
5645 || rs6000_cpu == PROCESSOR_POWER6
5646 || rs6000_cpu == PROCESSOR_POWER7
5647 || rs6000_cpu == PROCESSOR_POWER8
5648 || rs6000_cpu == PROCESSOR_POWER9))
5649 return 5;
5650 else
5651 return align_loops_log;
5654 /* Implement TARGET_LOOP_ALIGN_MAX_SKIP. */
5655 static int
5656 rs6000_loop_align_max_skip (rtx_insn *label)
5658 return (1 << rs6000_loop_align (label)) - 1;
5661 /* Return true iff, data reference of TYPE can reach vector alignment (16)
5662 after applying N number of iterations. This routine does not determine
5663 how may iterations are required to reach desired alignment. */
5665 static bool
5666 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
5668 if (is_packed)
5669 return false;
5671 if (TARGET_32BIT)
5673 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
5674 return true;
5676 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
5677 return true;
5679 return false;
5681 else
5683 if (TARGET_MACHO)
5684 return false;
5686 /* Assuming that all other types are naturally aligned. CHECKME! */
5687 return true;
5691 /* Return true if the vector misalignment factor is supported by the
5692 target. */
5693 static bool
5694 rs6000_builtin_support_vector_misalignment (machine_mode mode,
5695 const_tree type,
5696 int misalignment,
5697 bool is_packed)
5699 if (TARGET_VSX)
5701 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5702 return true;
5704 /* Return if movmisalign pattern is not supported for this mode. */
5705 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
5706 return false;
5708 if (misalignment == -1)
5710 /* Misalignment factor is unknown at compile time but we know
5711 it's word aligned. */
5712 if (rs6000_vector_alignment_reachable (type, is_packed))
5714 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
5716 if (element_size == 64 || element_size == 32)
5717 return true;
5720 return false;
5723 /* VSX supports word-aligned vector. */
5724 if (misalignment % 4 == 0)
5725 return true;
5727 return false;
5730 /* Implement targetm.vectorize.builtin_vectorization_cost. */
5731 static int
5732 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
5733 tree vectype, int misalign)
5735 unsigned elements;
5736 tree elem_type;
5738 switch (type_of_cost)
5740 case scalar_stmt:
5741 case scalar_load:
5742 case scalar_store:
5743 case vector_stmt:
5744 case vector_load:
5745 case vector_store:
5746 case vec_to_scalar:
5747 case scalar_to_vec:
5748 case cond_branch_not_taken:
5749 return 1;
5751 case vec_perm:
5752 if (TARGET_VSX)
5753 return 3;
5754 else
5755 return 1;
5757 case vec_promote_demote:
5758 if (TARGET_VSX)
5759 return 4;
5760 else
5761 return 1;
5763 case cond_branch_taken:
5764 return 3;
5766 case unaligned_load:
5767 if (TARGET_P9_VECTOR)
5768 return 3;
5770 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5771 return 1;
5773 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5775 elements = TYPE_VECTOR_SUBPARTS (vectype);
5776 if (elements == 2)
5777 /* Double word aligned. */
5778 return 2;
5780 if (elements == 4)
5782 switch (misalign)
5784 case 8:
5785 /* Double word aligned. */
5786 return 2;
5788 case -1:
5789 /* Unknown misalignment. */
5790 case 4:
5791 case 12:
5792 /* Word aligned. */
5793 return 22;
5795 default:
5796 gcc_unreachable ();
5801 if (TARGET_ALTIVEC)
5802 /* Misaligned loads are not supported. */
5803 gcc_unreachable ();
5805 return 2;
5807 case unaligned_store:
5808 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5809 return 1;
5811 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5813 elements = TYPE_VECTOR_SUBPARTS (vectype);
5814 if (elements == 2)
5815 /* Double word aligned. */
5816 return 2;
5818 if (elements == 4)
5820 switch (misalign)
5822 case 8:
5823 /* Double word aligned. */
5824 return 2;
5826 case -1:
5827 /* Unknown misalignment. */
5828 case 4:
5829 case 12:
5830 /* Word aligned. */
5831 return 23;
5833 default:
5834 gcc_unreachable ();
5839 if (TARGET_ALTIVEC)
5840 /* Misaligned stores are not supported. */
5841 gcc_unreachable ();
5843 return 2;
5845 case vec_construct:
5846 /* This is a rough approximation assuming non-constant elements
5847 constructed into a vector via element insertion. FIXME:
5848 vec_construct is not granular enough for uniformly good
5849 decisions. If the initialization is a splat, this is
5850 cheaper than we estimate. Improve this someday. */
5851 elem_type = TREE_TYPE (vectype);
5852 /* 32-bit vectors loaded into registers are stored as double
5853 precision, so we need 2 permutes, 2 converts, and 1 merge
5854 to construct a vector of short floats from them. */
5855 if (SCALAR_FLOAT_TYPE_P (elem_type)
5856 && TYPE_PRECISION (elem_type) == 32)
5857 return 5;
5858 /* On POWER9, integer vector types are built up in GPRs and then
5859 use a direct move (2 cycles). For POWER8 this is even worse,
5860 as we need two direct moves and a merge, and the direct moves
5861 are five cycles. */
5862 else if (INTEGRAL_TYPE_P (elem_type))
5864 if (TARGET_P9_VECTOR)
5865 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 2;
5866 else
5867 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 11;
5869 else
5870 /* V2DFmode doesn't need a direct move. */
5871 return 2;
5873 default:
5874 gcc_unreachable ();
5878 /* Implement targetm.vectorize.preferred_simd_mode. */
5880 static machine_mode
5881 rs6000_preferred_simd_mode (machine_mode mode)
5883 if (TARGET_VSX)
5884 switch (mode)
5886 case DFmode:
5887 return V2DFmode;
5888 default:;
5890 if (TARGET_ALTIVEC || TARGET_VSX)
5891 switch (mode)
5893 case SFmode:
5894 return V4SFmode;
5895 case TImode:
5896 return V1TImode;
5897 case DImode:
5898 return V2DImode;
5899 case SImode:
5900 return V4SImode;
5901 case HImode:
5902 return V8HImode;
5903 case QImode:
5904 return V16QImode;
5905 default:;
5907 if (TARGET_SPE)
5908 switch (mode)
5910 case SFmode:
5911 return V2SFmode;
5912 case SImode:
5913 return V2SImode;
5914 default:;
5916 if (TARGET_PAIRED_FLOAT
5917 && mode == SFmode)
5918 return V2SFmode;
5919 return word_mode;
5922 typedef struct _rs6000_cost_data
5924 struct loop *loop_info;
5925 unsigned cost[3];
5926 } rs6000_cost_data;
5928 /* Test for likely overcommitment of vector hardware resources. If a
5929 loop iteration is relatively large, and too large a percentage of
5930 instructions in the loop are vectorized, the cost model may not
5931 adequately reflect delays from unavailable vector resources.
5932 Penalize the loop body cost for this case. */
5934 static void
5935 rs6000_density_test (rs6000_cost_data *data)
5937 const int DENSITY_PCT_THRESHOLD = 85;
5938 const int DENSITY_SIZE_THRESHOLD = 70;
5939 const int DENSITY_PENALTY = 10;
5940 struct loop *loop = data->loop_info;
5941 basic_block *bbs = get_loop_body (loop);
5942 int nbbs = loop->num_nodes;
5943 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
5944 int i, density_pct;
5946 for (i = 0; i < nbbs; i++)
5948 basic_block bb = bbs[i];
5949 gimple_stmt_iterator gsi;
5951 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5953 gimple *stmt = gsi_stmt (gsi);
5954 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5956 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5957 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
5958 not_vec_cost++;
5962 free (bbs);
5963 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
5965 if (density_pct > DENSITY_PCT_THRESHOLD
5966 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
5968 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
5969 if (dump_enabled_p ())
5970 dump_printf_loc (MSG_NOTE, vect_location,
5971 "density %d%%, cost %d exceeds threshold, penalizing "
5972 "loop body cost by %d%%", density_pct,
5973 vec_cost + not_vec_cost, DENSITY_PENALTY);
5977 /* Implement targetm.vectorize.init_cost. */
5979 /* For each vectorized loop, this var holds TRUE iff a non-memory vector
5980 instruction is needed by the vectorization. */
5981 static bool rs6000_vect_nonmem;
5983 static void *
5984 rs6000_init_cost (struct loop *loop_info)
5986 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
5987 data->loop_info = loop_info;
5988 data->cost[vect_prologue] = 0;
5989 data->cost[vect_body] = 0;
5990 data->cost[vect_epilogue] = 0;
5991 rs6000_vect_nonmem = false;
5992 return data;
5995 /* Implement targetm.vectorize.add_stmt_cost. */
5997 static unsigned
5998 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
5999 struct _stmt_vec_info *stmt_info, int misalign,
6000 enum vect_cost_model_location where)
6002 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
6003 unsigned retval = 0;
6005 if (flag_vect_cost_model)
6007 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
6008 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
6009 misalign);
6010 /* Statements in an inner loop relative to the loop being
6011 vectorized are weighted more heavily. The value here is
6012 arbitrary and could potentially be improved with analysis. */
6013 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
6014 count *= 50; /* FIXME. */
6016 retval = (unsigned) (count * stmt_cost);
6017 cost_data->cost[where] += retval;
6019 /* Check whether we're doing something other than just a copy loop.
6020 Not all such loops may be profitably vectorized; see
6021 rs6000_finish_cost. */
6022 if ((kind == vec_to_scalar || kind == vec_perm
6023 || kind == vec_promote_demote || kind == vec_construct
6024 || kind == scalar_to_vec)
6025 || (where == vect_body && kind == vector_stmt))
6026 rs6000_vect_nonmem = true;
6029 return retval;
6032 /* Implement targetm.vectorize.finish_cost. */
6034 static void
6035 rs6000_finish_cost (void *data, unsigned *prologue_cost,
6036 unsigned *body_cost, unsigned *epilogue_cost)
6038 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
6040 if (cost_data->loop_info)
6041 rs6000_density_test (cost_data);
6043 /* Don't vectorize minimum-vectorization-factor, simple copy loops
6044 that require versioning for any reason. The vectorization is at
6045 best a wash inside the loop, and the versioning checks make
6046 profitability highly unlikely and potentially quite harmful. */
6047 if (cost_data->loop_info)
6049 loop_vec_info vec_info = loop_vec_info_for_loop (cost_data->loop_info);
6050 if (!rs6000_vect_nonmem
6051 && LOOP_VINFO_VECT_FACTOR (vec_info) == 2
6052 && LOOP_REQUIRES_VERSIONING (vec_info))
6053 cost_data->cost[vect_body] += 10000;
6056 *prologue_cost = cost_data->cost[vect_prologue];
6057 *body_cost = cost_data->cost[vect_body];
6058 *epilogue_cost = cost_data->cost[vect_epilogue];
6061 /* Implement targetm.vectorize.destroy_cost_data. */
6063 static void
6064 rs6000_destroy_cost_data (void *data)
6066 free (data);
6069 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
6070 library with vectorized intrinsics. */
6072 static tree
6073 rs6000_builtin_vectorized_libmass (combined_fn fn, tree type_out,
6074 tree type_in)
6076 char name[32];
6077 const char *suffix = NULL;
6078 tree fntype, new_fndecl, bdecl = NULL_TREE;
6079 int n_args = 1;
6080 const char *bname;
6081 machine_mode el_mode, in_mode;
6082 int n, in_n;
6084 /* Libmass is suitable for unsafe math only as it does not correctly support
6085 parts of IEEE with the required precision such as denormals. Only support
6086 it if we have VSX to use the simd d2 or f4 functions.
6087 XXX: Add variable length support. */
6088 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
6089 return NULL_TREE;
6091 el_mode = TYPE_MODE (TREE_TYPE (type_out));
6092 n = TYPE_VECTOR_SUBPARTS (type_out);
6093 in_mode = TYPE_MODE (TREE_TYPE (type_in));
6094 in_n = TYPE_VECTOR_SUBPARTS (type_in);
6095 if (el_mode != in_mode
6096 || n != in_n)
6097 return NULL_TREE;
6099 switch (fn)
6101 CASE_CFN_ATAN2:
6102 CASE_CFN_HYPOT:
6103 CASE_CFN_POW:
6104 n_args = 2;
6105 gcc_fallthrough ();
6107 CASE_CFN_ACOS:
6108 CASE_CFN_ACOSH:
6109 CASE_CFN_ASIN:
6110 CASE_CFN_ASINH:
6111 CASE_CFN_ATAN:
6112 CASE_CFN_ATANH:
6113 CASE_CFN_CBRT:
6114 CASE_CFN_COS:
6115 CASE_CFN_COSH:
6116 CASE_CFN_ERF:
6117 CASE_CFN_ERFC:
6118 CASE_CFN_EXP2:
6119 CASE_CFN_EXP:
6120 CASE_CFN_EXPM1:
6121 CASE_CFN_LGAMMA:
6122 CASE_CFN_LOG10:
6123 CASE_CFN_LOG1P:
6124 CASE_CFN_LOG2:
6125 CASE_CFN_LOG:
6126 CASE_CFN_SIN:
6127 CASE_CFN_SINH:
6128 CASE_CFN_SQRT:
6129 CASE_CFN_TAN:
6130 CASE_CFN_TANH:
6131 if (el_mode == DFmode && n == 2)
6133 bdecl = mathfn_built_in (double_type_node, fn);
6134 suffix = "d2"; /* pow -> powd2 */
6136 else if (el_mode == SFmode && n == 4)
6138 bdecl = mathfn_built_in (float_type_node, fn);
6139 suffix = "4"; /* powf -> powf4 */
6141 else
6142 return NULL_TREE;
6143 if (!bdecl)
6144 return NULL_TREE;
6145 break;
6147 default:
6148 return NULL_TREE;
6151 gcc_assert (suffix != NULL);
6152 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
6153 if (!bname)
6154 return NULL_TREE;
6156 strcpy (name, bname + sizeof ("__builtin_") - 1);
6157 strcat (name, suffix);
6159 if (n_args == 1)
6160 fntype = build_function_type_list (type_out, type_in, NULL);
6161 else if (n_args == 2)
6162 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
6163 else
6164 gcc_unreachable ();
6166 /* Build a function declaration for the vectorized function. */
6167 new_fndecl = build_decl (BUILTINS_LOCATION,
6168 FUNCTION_DECL, get_identifier (name), fntype);
6169 TREE_PUBLIC (new_fndecl) = 1;
6170 DECL_EXTERNAL (new_fndecl) = 1;
6171 DECL_IS_NOVOPS (new_fndecl) = 1;
6172 TREE_READONLY (new_fndecl) = 1;
6174 return new_fndecl;
6177 /* Returns a function decl for a vectorized version of the builtin function
6178 with builtin function code FN and the result vector type TYPE, or NULL_TREE
6179 if it is not available. */
6181 static tree
6182 rs6000_builtin_vectorized_function (unsigned int fn, tree type_out,
6183 tree type_in)
6185 machine_mode in_mode, out_mode;
6186 int in_n, out_n;
6188 if (TARGET_DEBUG_BUILTIN)
6189 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
6190 combined_fn_name (combined_fn (fn)),
6191 GET_MODE_NAME (TYPE_MODE (type_out)),
6192 GET_MODE_NAME (TYPE_MODE (type_in)));
6194 if (TREE_CODE (type_out) != VECTOR_TYPE
6195 || TREE_CODE (type_in) != VECTOR_TYPE
6196 || !TARGET_VECTORIZE_BUILTINS)
6197 return NULL_TREE;
6199 out_mode = TYPE_MODE (TREE_TYPE (type_out));
6200 out_n = TYPE_VECTOR_SUBPARTS (type_out);
6201 in_mode = TYPE_MODE (TREE_TYPE (type_in));
6202 in_n = TYPE_VECTOR_SUBPARTS (type_in);
6204 switch (fn)
6206 CASE_CFN_COPYSIGN:
6207 if (VECTOR_UNIT_VSX_P (V2DFmode)
6208 && out_mode == DFmode && out_n == 2
6209 && in_mode == DFmode && in_n == 2)
6210 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
6211 if (VECTOR_UNIT_VSX_P (V4SFmode)
6212 && out_mode == SFmode && out_n == 4
6213 && in_mode == SFmode && in_n == 4)
6214 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
6215 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
6216 && out_mode == SFmode && out_n == 4
6217 && in_mode == SFmode && in_n == 4)
6218 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
6219 break;
6220 CASE_CFN_CEIL:
6221 if (VECTOR_UNIT_VSX_P (V2DFmode)
6222 && out_mode == DFmode && out_n == 2
6223 && in_mode == DFmode && in_n == 2)
6224 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
6225 if (VECTOR_UNIT_VSX_P (V4SFmode)
6226 && out_mode == SFmode && out_n == 4
6227 && in_mode == SFmode && in_n == 4)
6228 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
6229 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
6230 && out_mode == SFmode && out_n == 4
6231 && in_mode == SFmode && in_n == 4)
6232 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
6233 break;
6234 CASE_CFN_FLOOR:
6235 if (VECTOR_UNIT_VSX_P (V2DFmode)
6236 && out_mode == DFmode && out_n == 2
6237 && in_mode == DFmode && in_n == 2)
6238 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
6239 if (VECTOR_UNIT_VSX_P (V4SFmode)
6240 && out_mode == SFmode && out_n == 4
6241 && in_mode == SFmode && in_n == 4)
6242 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
6243 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
6244 && out_mode == SFmode && out_n == 4
6245 && in_mode == SFmode && in_n == 4)
6246 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
6247 break;
6248 CASE_CFN_FMA:
6249 if (VECTOR_UNIT_VSX_P (V2DFmode)
6250 && out_mode == DFmode && out_n == 2
6251 && in_mode == DFmode && in_n == 2)
6252 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
6253 if (VECTOR_UNIT_VSX_P (V4SFmode)
6254 && out_mode == SFmode && out_n == 4
6255 && in_mode == SFmode && in_n == 4)
6256 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
6257 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
6258 && out_mode == SFmode && out_n == 4
6259 && in_mode == SFmode && in_n == 4)
6260 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
6261 break;
6262 CASE_CFN_TRUNC:
6263 if (VECTOR_UNIT_VSX_P (V2DFmode)
6264 && out_mode == DFmode && out_n == 2
6265 && in_mode == DFmode && in_n == 2)
6266 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
6267 if (VECTOR_UNIT_VSX_P (V4SFmode)
6268 && out_mode == SFmode && out_n == 4
6269 && in_mode == SFmode && in_n == 4)
6270 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
6271 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
6272 && out_mode == SFmode && out_n == 4
6273 && in_mode == SFmode && in_n == 4)
6274 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
6275 break;
6276 CASE_CFN_NEARBYINT:
6277 if (VECTOR_UNIT_VSX_P (V2DFmode)
6278 && flag_unsafe_math_optimizations
6279 && out_mode == DFmode && out_n == 2
6280 && in_mode == DFmode && in_n == 2)
6281 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
6282 if (VECTOR_UNIT_VSX_P (V4SFmode)
6283 && flag_unsafe_math_optimizations
6284 && out_mode == SFmode && out_n == 4
6285 && in_mode == SFmode && in_n == 4)
6286 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
6287 break;
6288 CASE_CFN_RINT:
6289 if (VECTOR_UNIT_VSX_P (V2DFmode)
6290 && !flag_trapping_math
6291 && out_mode == DFmode && out_n == 2
6292 && in_mode == DFmode && in_n == 2)
6293 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
6294 if (VECTOR_UNIT_VSX_P (V4SFmode)
6295 && !flag_trapping_math
6296 && out_mode == SFmode && out_n == 4
6297 && in_mode == SFmode && in_n == 4)
6298 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
6299 break;
6300 default:
6301 break;
6304 /* Generate calls to libmass if appropriate. */
6305 if (rs6000_veclib_handler)
6306 return rs6000_veclib_handler (combined_fn (fn), type_out, type_in);
6308 return NULL_TREE;
6311 /* Implement TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION. */
6313 static tree
6314 rs6000_builtin_md_vectorized_function (tree fndecl, tree type_out,
6315 tree type_in)
6317 machine_mode in_mode, out_mode;
6318 int in_n, out_n;
6320 if (TARGET_DEBUG_BUILTIN)
6321 fprintf (stderr, "rs6000_builtin_md_vectorized_function (%s, %s, %s)\n",
6322 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
6323 GET_MODE_NAME (TYPE_MODE (type_out)),
6324 GET_MODE_NAME (TYPE_MODE (type_in)));
6326 if (TREE_CODE (type_out) != VECTOR_TYPE
6327 || TREE_CODE (type_in) != VECTOR_TYPE
6328 || !TARGET_VECTORIZE_BUILTINS)
6329 return NULL_TREE;
6331 out_mode = TYPE_MODE (TREE_TYPE (type_out));
6332 out_n = TYPE_VECTOR_SUBPARTS (type_out);
6333 in_mode = TYPE_MODE (TREE_TYPE (type_in));
6334 in_n = TYPE_VECTOR_SUBPARTS (type_in);
6336 enum rs6000_builtins fn
6337 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
6338 switch (fn)
6340 case RS6000_BUILTIN_RSQRTF:
6341 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
6342 && out_mode == SFmode && out_n == 4
6343 && in_mode == SFmode && in_n == 4)
6344 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
6345 break;
6346 case RS6000_BUILTIN_RSQRT:
6347 if (VECTOR_UNIT_VSX_P (V2DFmode)
6348 && out_mode == DFmode && out_n == 2
6349 && in_mode == DFmode && in_n == 2)
6350 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
6351 break;
6352 case RS6000_BUILTIN_RECIPF:
6353 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
6354 && out_mode == SFmode && out_n == 4
6355 && in_mode == SFmode && in_n == 4)
6356 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
6357 break;
6358 case RS6000_BUILTIN_RECIP:
6359 if (VECTOR_UNIT_VSX_P (V2DFmode)
6360 && out_mode == DFmode && out_n == 2
6361 && in_mode == DFmode && in_n == 2)
6362 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
6363 break;
6364 default:
6365 break;
6367 return NULL_TREE;
6370 /* Default CPU string for rs6000*_file_start functions. */
6371 static const char *rs6000_default_cpu;
6373 /* Do anything needed at the start of the asm file. */
6375 static void
6376 rs6000_file_start (void)
6378 char buffer[80];
6379 const char *start = buffer;
6380 FILE *file = asm_out_file;
6382 rs6000_default_cpu = TARGET_CPU_DEFAULT;
6384 default_file_start ();
6386 if (flag_verbose_asm)
6388 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
6390 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
6392 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
6393 start = "";
6396 if (global_options_set.x_rs6000_cpu_index)
6398 fprintf (file, "%s -mcpu=%s", start,
6399 processor_target_table[rs6000_cpu_index].name);
6400 start = "";
6403 if (global_options_set.x_rs6000_tune_index)
6405 fprintf (file, "%s -mtune=%s", start,
6406 processor_target_table[rs6000_tune_index].name);
6407 start = "";
6410 if (PPC405_ERRATUM77)
6412 fprintf (file, "%s PPC405CR_ERRATUM77", start);
6413 start = "";
6416 #ifdef USING_ELFOS_H
6417 switch (rs6000_sdata)
6419 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
6420 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
6421 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
6422 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
6425 if (rs6000_sdata && g_switch_value)
6427 fprintf (file, "%s -G %d", start,
6428 g_switch_value);
6429 start = "";
6431 #endif
6433 if (*start == '\0')
6434 putc ('\n', file);
6437 #ifdef USING_ELFOS_H
6438 if (!(rs6000_default_cpu && rs6000_default_cpu[0])
6439 && !global_options_set.x_rs6000_cpu_index)
6441 fputs ("\t.machine ", asm_out_file);
6442 if ((rs6000_isa_flags & OPTION_MASK_MODULO) != 0)
6443 fputs ("power9\n", asm_out_file);
6444 else if ((rs6000_isa_flags & OPTION_MASK_DIRECT_MOVE) != 0)
6445 fputs ("power8\n", asm_out_file);
6446 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTD) != 0)
6447 fputs ("power7\n", asm_out_file);
6448 else if ((rs6000_isa_flags & OPTION_MASK_CMPB) != 0)
6449 fputs ("power6\n", asm_out_file);
6450 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTB) != 0)
6451 fputs ("power5\n", asm_out_file);
6452 else if ((rs6000_isa_flags & OPTION_MASK_MFCRF) != 0)
6453 fputs ("power4\n", asm_out_file);
6454 else if ((rs6000_isa_flags & OPTION_MASK_POWERPC64) != 0)
6455 fputs ("ppc64\n", asm_out_file);
6456 else
6457 fputs ("ppc\n", asm_out_file);
6459 #endif
6461 if (DEFAULT_ABI == ABI_ELFv2)
6462 fprintf (file, "\t.abiversion 2\n");
6466 /* Return nonzero if this function is known to have a null epilogue. */
6469 direct_return (void)
6471 if (reload_completed)
6473 rs6000_stack_t *info = rs6000_stack_info ();
6475 if (info->first_gp_reg_save == 32
6476 && info->first_fp_reg_save == 64
6477 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
6478 && ! info->lr_save_p
6479 && ! info->cr_save_p
6480 && info->vrsave_size == 0
6481 && ! info->push_p)
6482 return 1;
6485 return 0;
6488 /* Return the number of instructions it takes to form a constant in an
6489 integer register. */
6492 num_insns_constant_wide (HOST_WIDE_INT value)
6494 /* signed constant loadable with addi */
6495 if (((unsigned HOST_WIDE_INT) value + 0x8000) < 0x10000)
6496 return 1;
6498 /* constant loadable with addis */
6499 else if ((value & 0xffff) == 0
6500 && (value >> 31 == -1 || value >> 31 == 0))
6501 return 1;
6503 else if (TARGET_POWERPC64)
6505 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
6506 HOST_WIDE_INT high = value >> 31;
6508 if (high == 0 || high == -1)
6509 return 2;
6511 high >>= 1;
6513 if (low == 0)
6514 return num_insns_constant_wide (high) + 1;
6515 else if (high == 0)
6516 return num_insns_constant_wide (low) + 1;
6517 else
6518 return (num_insns_constant_wide (high)
6519 + num_insns_constant_wide (low) + 1);
6522 else
6523 return 2;
6527 num_insns_constant (rtx op, machine_mode mode)
6529 HOST_WIDE_INT low, high;
6531 switch (GET_CODE (op))
6533 case CONST_INT:
6534 if ((INTVAL (op) >> 31) != 0 && (INTVAL (op) >> 31) != -1
6535 && rs6000_is_valid_and_mask (op, mode))
6536 return 2;
6537 else
6538 return num_insns_constant_wide (INTVAL (op));
6540 case CONST_WIDE_INT:
6542 int i;
6543 int ins = CONST_WIDE_INT_NUNITS (op) - 1;
6544 for (i = 0; i < CONST_WIDE_INT_NUNITS (op); i++)
6545 ins += num_insns_constant_wide (CONST_WIDE_INT_ELT (op, i));
6546 return ins;
6549 case CONST_DOUBLE:
6550 if (mode == SFmode || mode == SDmode)
6552 long l;
6554 if (DECIMAL_FLOAT_MODE_P (mode))
6555 REAL_VALUE_TO_TARGET_DECIMAL32
6556 (*CONST_DOUBLE_REAL_VALUE (op), l);
6557 else
6558 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), l);
6559 return num_insns_constant_wide ((HOST_WIDE_INT) l);
6562 long l[2];
6563 if (DECIMAL_FLOAT_MODE_P (mode))
6564 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (op), l);
6565 else
6566 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op), l);
6567 high = l[WORDS_BIG_ENDIAN == 0];
6568 low = l[WORDS_BIG_ENDIAN != 0];
6570 if (TARGET_32BIT)
6571 return (num_insns_constant_wide (low)
6572 + num_insns_constant_wide (high));
6573 else
6575 if ((high == 0 && low >= 0)
6576 || (high == -1 && low < 0))
6577 return num_insns_constant_wide (low);
6579 else if (rs6000_is_valid_and_mask (op, mode))
6580 return 2;
6582 else if (low == 0)
6583 return num_insns_constant_wide (high) + 1;
6585 else
6586 return (num_insns_constant_wide (high)
6587 + num_insns_constant_wide (low) + 1);
6590 default:
6591 gcc_unreachable ();
6595 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
6596 If the mode of OP is MODE_VECTOR_INT, this simply returns the
6597 corresponding element of the vector, but for V4SFmode and V2SFmode,
6598 the corresponding "float" is interpreted as an SImode integer. */
6600 HOST_WIDE_INT
6601 const_vector_elt_as_int (rtx op, unsigned int elt)
6603 rtx tmp;
6605 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
6606 gcc_assert (GET_MODE (op) != V2DImode
6607 && GET_MODE (op) != V2DFmode);
6609 tmp = CONST_VECTOR_ELT (op, elt);
6610 if (GET_MODE (op) == V4SFmode
6611 || GET_MODE (op) == V2SFmode)
6612 tmp = gen_lowpart (SImode, tmp);
6613 return INTVAL (tmp);
6616 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
6617 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
6618 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
6619 all items are set to the same value and contain COPIES replicas of the
6620 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
6621 operand and the others are set to the value of the operand's msb. */
6623 static bool
6624 vspltis_constant (rtx op, unsigned step, unsigned copies)
6626 machine_mode mode = GET_MODE (op);
6627 machine_mode inner = GET_MODE_INNER (mode);
6629 unsigned i;
6630 unsigned nunits;
6631 unsigned bitsize;
6632 unsigned mask;
6634 HOST_WIDE_INT val;
6635 HOST_WIDE_INT splat_val;
6636 HOST_WIDE_INT msb_val;
6638 if (mode == V2DImode || mode == V2DFmode || mode == V1TImode)
6639 return false;
6641 nunits = GET_MODE_NUNITS (mode);
6642 bitsize = GET_MODE_BITSIZE (inner);
6643 mask = GET_MODE_MASK (inner);
6645 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6646 splat_val = val;
6647 msb_val = val >= 0 ? 0 : -1;
6649 /* Construct the value to be splatted, if possible. If not, return 0. */
6650 for (i = 2; i <= copies; i *= 2)
6652 HOST_WIDE_INT small_val;
6653 bitsize /= 2;
6654 small_val = splat_val >> bitsize;
6655 mask >>= bitsize;
6656 if (splat_val != ((HOST_WIDE_INT)
6657 ((unsigned HOST_WIDE_INT) small_val << bitsize)
6658 | (small_val & mask)))
6659 return false;
6660 splat_val = small_val;
6663 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
6664 if (EASY_VECTOR_15 (splat_val))
6667 /* Also check if we can splat, and then add the result to itself. Do so if
6668 the value is positive, of if the splat instruction is using OP's mode;
6669 for splat_val < 0, the splat and the add should use the same mode. */
6670 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
6671 && (splat_val >= 0 || (step == 1 && copies == 1)))
6674 /* Also check if are loading up the most significant bit which can be done by
6675 loading up -1 and shifting the value left by -1. */
6676 else if (EASY_VECTOR_MSB (splat_val, inner))
6679 else
6680 return false;
6682 /* Check if VAL is present in every STEP-th element, and the
6683 other elements are filled with its most significant bit. */
6684 for (i = 1; i < nunits; ++i)
6686 HOST_WIDE_INT desired_val;
6687 unsigned elt = BYTES_BIG_ENDIAN ? nunits - 1 - i : i;
6688 if ((i & (step - 1)) == 0)
6689 desired_val = val;
6690 else
6691 desired_val = msb_val;
6693 if (desired_val != const_vector_elt_as_int (op, elt))
6694 return false;
6697 return true;
6700 /* Like vsplitis_constant, but allow the value to be shifted left with a VSLDOI
6701 instruction, filling in the bottom elements with 0 or -1.
6703 Return 0 if the constant cannot be generated with VSLDOI. Return positive
6704 for the number of zeroes to shift in, or negative for the number of 0xff
6705 bytes to shift in.
6707 OP is a CONST_VECTOR. */
6710 vspltis_shifted (rtx op)
6712 machine_mode mode = GET_MODE (op);
6713 machine_mode inner = GET_MODE_INNER (mode);
6715 unsigned i, j;
6716 unsigned nunits;
6717 unsigned mask;
6719 HOST_WIDE_INT val;
6721 if (mode != V16QImode && mode != V8HImode && mode != V4SImode)
6722 return false;
6724 /* We need to create pseudo registers to do the shift, so don't recognize
6725 shift vector constants after reload. */
6726 if (!can_create_pseudo_p ())
6727 return false;
6729 nunits = GET_MODE_NUNITS (mode);
6730 mask = GET_MODE_MASK (inner);
6732 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? 0 : nunits - 1);
6734 /* Check if the value can really be the operand of a vspltis[bhw]. */
6735 if (EASY_VECTOR_15 (val))
6738 /* Also check if we are loading up the most significant bit which can be done
6739 by loading up -1 and shifting the value left by -1. */
6740 else if (EASY_VECTOR_MSB (val, inner))
6743 else
6744 return 0;
6746 /* Check if VAL is present in every STEP-th element until we find elements
6747 that are 0 or all 1 bits. */
6748 for (i = 1; i < nunits; ++i)
6750 unsigned elt = BYTES_BIG_ENDIAN ? i : nunits - 1 - i;
6751 HOST_WIDE_INT elt_val = const_vector_elt_as_int (op, elt);
6753 /* If the value isn't the splat value, check for the remaining elements
6754 being 0/-1. */
6755 if (val != elt_val)
6757 if (elt_val == 0)
6759 for (j = i+1; j < nunits; ++j)
6761 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6762 if (const_vector_elt_as_int (op, elt2) != 0)
6763 return 0;
6766 return (nunits - i) * GET_MODE_SIZE (inner);
6769 else if ((elt_val & mask) == mask)
6771 for (j = i+1; j < nunits; ++j)
6773 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6774 if ((const_vector_elt_as_int (op, elt2) & mask) != mask)
6775 return 0;
6778 return -((nunits - i) * GET_MODE_SIZE (inner));
6781 else
6782 return 0;
6786 /* If all elements are equal, we don't need to do VLSDOI. */
6787 return 0;
6791 /* Return true if OP is of the given MODE and can be synthesized
6792 with a vspltisb, vspltish or vspltisw. */
6794 bool
6795 easy_altivec_constant (rtx op, machine_mode mode)
6797 unsigned step, copies;
6799 if (mode == VOIDmode)
6800 mode = GET_MODE (op);
6801 else if (mode != GET_MODE (op))
6802 return false;
6804 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
6805 constants. */
6806 if (mode == V2DFmode)
6807 return zero_constant (op, mode);
6809 else if (mode == V2DImode)
6811 if (GET_CODE (CONST_VECTOR_ELT (op, 0)) != CONST_INT
6812 || GET_CODE (CONST_VECTOR_ELT (op, 1)) != CONST_INT)
6813 return false;
6815 if (zero_constant (op, mode))
6816 return true;
6818 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
6819 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
6820 return true;
6822 return false;
6825 /* V1TImode is a special container for TImode. Ignore for now. */
6826 else if (mode == V1TImode)
6827 return false;
6829 /* Start with a vspltisw. */
6830 step = GET_MODE_NUNITS (mode) / 4;
6831 copies = 1;
6833 if (vspltis_constant (op, step, copies))
6834 return true;
6836 /* Then try with a vspltish. */
6837 if (step == 1)
6838 copies <<= 1;
6839 else
6840 step >>= 1;
6842 if (vspltis_constant (op, step, copies))
6843 return true;
6845 /* And finally a vspltisb. */
6846 if (step == 1)
6847 copies <<= 1;
6848 else
6849 step >>= 1;
6851 if (vspltis_constant (op, step, copies))
6852 return true;
6854 if (vspltis_shifted (op) != 0)
6855 return true;
6857 return false;
6860 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
6861 result is OP. Abort if it is not possible. */
6864 gen_easy_altivec_constant (rtx op)
6866 machine_mode mode = GET_MODE (op);
6867 int nunits = GET_MODE_NUNITS (mode);
6868 rtx val = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6869 unsigned step = nunits / 4;
6870 unsigned copies = 1;
6872 /* Start with a vspltisw. */
6873 if (vspltis_constant (op, step, copies))
6874 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, val));
6876 /* Then try with a vspltish. */
6877 if (step == 1)
6878 copies <<= 1;
6879 else
6880 step >>= 1;
6882 if (vspltis_constant (op, step, copies))
6883 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, val));
6885 /* And finally a vspltisb. */
6886 if (step == 1)
6887 copies <<= 1;
6888 else
6889 step >>= 1;
6891 if (vspltis_constant (op, step, copies))
6892 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, val));
6894 gcc_unreachable ();
6897 /* Return true if OP is of the given MODE and can be synthesized with ISA 3.0
6898 instructions (xxspltib, vupkhsb/vextsb2w/vextb2d).
6900 Return the number of instructions needed (1 or 2) into the address pointed
6901 via NUM_INSNS_PTR.
6903 Return the constant that is being split via CONSTANT_PTR. */
6905 bool
6906 xxspltib_constant_p (rtx op,
6907 machine_mode mode,
6908 int *num_insns_ptr,
6909 int *constant_ptr)
6911 size_t nunits = GET_MODE_NUNITS (mode);
6912 size_t i;
6913 HOST_WIDE_INT value;
6914 rtx element;
6916 /* Set the returned values to out of bound values. */
6917 *num_insns_ptr = -1;
6918 *constant_ptr = 256;
6920 if (!TARGET_P9_VECTOR)
6921 return false;
6923 if (mode == VOIDmode)
6924 mode = GET_MODE (op);
6926 else if (mode != GET_MODE (op) && GET_MODE (op) != VOIDmode)
6927 return false;
6929 /* Handle (vec_duplicate <constant>). */
6930 if (GET_CODE (op) == VEC_DUPLICATE)
6932 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6933 && mode != V2DImode)
6934 return false;
6936 element = XEXP (op, 0);
6937 if (!CONST_INT_P (element))
6938 return false;
6940 value = INTVAL (element);
6941 if (!IN_RANGE (value, -128, 127))
6942 return false;
6945 /* Handle (const_vector [...]). */
6946 else if (GET_CODE (op) == CONST_VECTOR)
6948 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6949 && mode != V2DImode)
6950 return false;
6952 element = CONST_VECTOR_ELT (op, 0);
6953 if (!CONST_INT_P (element))
6954 return false;
6956 value = INTVAL (element);
6957 if (!IN_RANGE (value, -128, 127))
6958 return false;
6960 for (i = 1; i < nunits; i++)
6962 element = CONST_VECTOR_ELT (op, i);
6963 if (!CONST_INT_P (element))
6964 return false;
6966 if (value != INTVAL (element))
6967 return false;
6971 /* Handle integer constants being loaded into the upper part of the VSX
6972 register as a scalar. If the value isn't 0/-1, only allow it if the mode
6973 can go in Altivec registers. Prefer VSPLTISW/VUPKHSW over XXSPLITIB. */
6974 else if (CONST_INT_P (op))
6976 if (!SCALAR_INT_MODE_P (mode))
6977 return false;
6979 value = INTVAL (op);
6980 if (!IN_RANGE (value, -128, 127))
6981 return false;
6983 if (!IN_RANGE (value, -1, 0))
6985 if (!(reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID))
6986 return false;
6988 if (EASY_VECTOR_15 (value))
6989 return false;
6993 else
6994 return false;
6996 /* See if we could generate vspltisw/vspltish directly instead of xxspltib +
6997 sign extend. Special case 0/-1 to allow getting any VSX register instead
6998 of an Altivec register. */
6999 if ((mode == V4SImode || mode == V8HImode) && !IN_RANGE (value, -1, 0)
7000 && EASY_VECTOR_15 (value))
7001 return false;
7003 /* Return # of instructions and the constant byte for XXSPLTIB. */
7004 if (mode == V16QImode)
7005 *num_insns_ptr = 1;
7007 else if (IN_RANGE (value, -1, 0))
7008 *num_insns_ptr = 1;
7010 else
7011 *num_insns_ptr = 2;
7013 *constant_ptr = (int) value;
7014 return true;
7017 const char *
7018 output_vec_const_move (rtx *operands)
7020 int cst, cst2, shift;
7021 machine_mode mode;
7022 rtx dest, vec;
7024 dest = operands[0];
7025 vec = operands[1];
7026 mode = GET_MODE (dest);
7028 if (TARGET_VSX)
7030 bool dest_vmx_p = ALTIVEC_REGNO_P (REGNO (dest));
7031 int xxspltib_value = 256;
7032 int num_insns = -1;
7034 if (zero_constant (vec, mode))
7036 if (TARGET_P9_VECTOR)
7037 return "xxspltib %x0,0";
7039 else if (dest_vmx_p)
7040 return "vspltisw %0,0";
7042 else
7043 return "xxlxor %x0,%x0,%x0";
7046 if (all_ones_constant (vec, mode))
7048 if (TARGET_P9_VECTOR)
7049 return "xxspltib %x0,255";
7051 else if (dest_vmx_p)
7052 return "vspltisw %0,-1";
7054 else if (TARGET_P8_VECTOR)
7055 return "xxlorc %x0,%x0,%x0";
7057 else
7058 gcc_unreachable ();
7061 if (TARGET_P9_VECTOR
7062 && xxspltib_constant_p (vec, mode, &num_insns, &xxspltib_value))
7064 if (num_insns == 1)
7066 operands[2] = GEN_INT (xxspltib_value & 0xff);
7067 return "xxspltib %x0,%2";
7070 return "#";
7074 if (TARGET_ALTIVEC)
7076 rtx splat_vec;
7078 gcc_assert (ALTIVEC_REGNO_P (REGNO (dest)));
7079 if (zero_constant (vec, mode))
7080 return "vspltisw %0,0";
7082 if (all_ones_constant (vec, mode))
7083 return "vspltisw %0,-1";
7085 /* Do we need to construct a value using VSLDOI? */
7086 shift = vspltis_shifted (vec);
7087 if (shift != 0)
7088 return "#";
7090 splat_vec = gen_easy_altivec_constant (vec);
7091 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
7092 operands[1] = XEXP (splat_vec, 0);
7093 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
7094 return "#";
7096 switch (GET_MODE (splat_vec))
7098 case V4SImode:
7099 return "vspltisw %0,%1";
7101 case V8HImode:
7102 return "vspltish %0,%1";
7104 case V16QImode:
7105 return "vspltisb %0,%1";
7107 default:
7108 gcc_unreachable ();
7112 gcc_assert (TARGET_SPE);
7114 /* Vector constant 0 is handled as a splitter of V2SI, and in the
7115 pattern of V1DI, V4HI, and V2SF.
7117 FIXME: We should probably return # and add post reload
7118 splitters for these, but this way is so easy ;-). */
7119 cst = INTVAL (CONST_VECTOR_ELT (vec, 0));
7120 cst2 = INTVAL (CONST_VECTOR_ELT (vec, 1));
7121 operands[1] = CONST_VECTOR_ELT (vec, 0);
7122 operands[2] = CONST_VECTOR_ELT (vec, 1);
7123 if (cst == cst2)
7124 return "li %0,%1\n\tevmergelo %0,%0,%0";
7125 else if (WORDS_BIG_ENDIAN)
7126 return "li %0,%1\n\tevmergelo %0,%0,%0\n\tli %0,%2";
7127 else
7128 return "li %0,%2\n\tevmergelo %0,%0,%0\n\tli %0,%1";
7131 /* Initialize TARGET of vector PAIRED to VALS. */
7133 void
7134 paired_expand_vector_init (rtx target, rtx vals)
7136 machine_mode mode = GET_MODE (target);
7137 int n_elts = GET_MODE_NUNITS (mode);
7138 int n_var = 0;
7139 rtx x, new_rtx, tmp, constant_op, op1, op2;
7140 int i;
7142 for (i = 0; i < n_elts; ++i)
7144 x = XVECEXP (vals, 0, i);
7145 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
7146 ++n_var;
7148 if (n_var == 0)
7150 /* Load from constant pool. */
7151 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
7152 return;
7155 if (n_var == 2)
7157 /* The vector is initialized only with non-constants. */
7158 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, XVECEXP (vals, 0, 0),
7159 XVECEXP (vals, 0, 1));
7161 emit_move_insn (target, new_rtx);
7162 return;
7165 /* One field is non-constant and the other one is a constant. Load the
7166 constant from the constant pool and use ps_merge instruction to
7167 construct the whole vector. */
7168 op1 = XVECEXP (vals, 0, 0);
7169 op2 = XVECEXP (vals, 0, 1);
7171 constant_op = (CONSTANT_P (op1)) ? op1 : op2;
7173 tmp = gen_reg_rtx (GET_MODE (constant_op));
7174 emit_move_insn (tmp, constant_op);
7176 if (CONSTANT_P (op1))
7177 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, tmp, op2);
7178 else
7179 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, op1, tmp);
7181 emit_move_insn (target, new_rtx);
7184 void
7185 paired_expand_vector_move (rtx operands[])
7187 rtx op0 = operands[0], op1 = operands[1];
7189 emit_move_insn (op0, op1);
7192 /* Emit vector compare for code RCODE. DEST is destination, OP1 and
7193 OP2 are two VEC_COND_EXPR operands, CC_OP0 and CC_OP1 are the two
7194 operands for the relation operation COND. This is a recursive
7195 function. */
7197 static void
7198 paired_emit_vector_compare (enum rtx_code rcode,
7199 rtx dest, rtx op0, rtx op1,
7200 rtx cc_op0, rtx cc_op1)
7202 rtx tmp = gen_reg_rtx (V2SFmode);
7203 rtx tmp1, max, min;
7205 gcc_assert (TARGET_PAIRED_FLOAT);
7206 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
7208 switch (rcode)
7210 case LT:
7211 case LTU:
7212 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
7213 return;
7214 case GE:
7215 case GEU:
7216 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
7217 emit_insn (gen_selv2sf4 (dest, tmp, op0, op1, CONST0_RTX (SFmode)));
7218 return;
7219 case LE:
7220 case LEU:
7221 paired_emit_vector_compare (GE, dest, op0, op1, cc_op1, cc_op0);
7222 return;
7223 case GT:
7224 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
7225 return;
7226 case EQ:
7227 tmp1 = gen_reg_rtx (V2SFmode);
7228 max = gen_reg_rtx (V2SFmode);
7229 min = gen_reg_rtx (V2SFmode);
7230 gen_reg_rtx (V2SFmode);
7232 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
7233 emit_insn (gen_selv2sf4
7234 (max, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
7235 emit_insn (gen_subv2sf3 (tmp, cc_op1, cc_op0));
7236 emit_insn (gen_selv2sf4
7237 (min, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
7238 emit_insn (gen_subv2sf3 (tmp1, min, max));
7239 emit_insn (gen_selv2sf4 (dest, tmp1, op0, op1, CONST0_RTX (SFmode)));
7240 return;
7241 case NE:
7242 paired_emit_vector_compare (EQ, dest, op1, op0, cc_op0, cc_op1);
7243 return;
7244 case UNLE:
7245 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
7246 return;
7247 case UNLT:
7248 paired_emit_vector_compare (LT, dest, op1, op0, cc_op0, cc_op1);
7249 return;
7250 case UNGE:
7251 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
7252 return;
7253 case UNGT:
7254 paired_emit_vector_compare (GT, dest, op1, op0, cc_op0, cc_op1);
7255 return;
7256 default:
7257 gcc_unreachable ();
7260 return;
7263 /* Emit vector conditional expression.
7264 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
7265 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
7268 paired_emit_vector_cond_expr (rtx dest, rtx op1, rtx op2,
7269 rtx cond, rtx cc_op0, rtx cc_op1)
7271 enum rtx_code rcode = GET_CODE (cond);
7273 if (!TARGET_PAIRED_FLOAT)
7274 return 0;
7276 paired_emit_vector_compare (rcode, dest, op1, op2, cc_op0, cc_op1);
7278 return 1;
7281 /* Initialize vector TARGET to VALS. */
7283 void
7284 rs6000_expand_vector_init (rtx target, rtx vals)
7286 machine_mode mode = GET_MODE (target);
7287 machine_mode inner_mode = GET_MODE_INNER (mode);
7288 int n_elts = GET_MODE_NUNITS (mode);
7289 int n_var = 0, one_var = -1;
7290 bool all_same = true, all_const_zero = true;
7291 rtx x, mem;
7292 int i;
7294 for (i = 0; i < n_elts; ++i)
7296 x = XVECEXP (vals, 0, i);
7297 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
7298 ++n_var, one_var = i;
7299 else if (x != CONST0_RTX (inner_mode))
7300 all_const_zero = false;
7302 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
7303 all_same = false;
7306 if (n_var == 0)
7308 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
7309 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
7310 if ((int_vector_p || TARGET_VSX) && all_const_zero)
7312 /* Zero register. */
7313 emit_move_insn (target, CONST0_RTX (mode));
7314 return;
7316 else if (int_vector_p && easy_vector_constant (const_vec, mode))
7318 /* Splat immediate. */
7319 emit_insn (gen_rtx_SET (target, const_vec));
7320 return;
7322 else
7324 /* Load from constant pool. */
7325 emit_move_insn (target, const_vec);
7326 return;
7330 /* Double word values on VSX can use xxpermdi or lxvdsx. */
7331 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
7333 rtx op[2];
7334 size_t i;
7335 size_t num_elements = all_same ? 1 : 2;
7336 for (i = 0; i < num_elements; i++)
7338 op[i] = XVECEXP (vals, 0, i);
7339 /* Just in case there is a SUBREG with a smaller mode, do a
7340 conversion. */
7341 if (GET_MODE (op[i]) != inner_mode)
7343 rtx tmp = gen_reg_rtx (inner_mode);
7344 convert_move (tmp, op[i], 0);
7345 op[i] = tmp;
7347 /* Allow load with splat double word. */
7348 else if (MEM_P (op[i]))
7350 if (!all_same)
7351 op[i] = force_reg (inner_mode, op[i]);
7353 else if (!REG_P (op[i]))
7354 op[i] = force_reg (inner_mode, op[i]);
7357 if (all_same)
7359 if (mode == V2DFmode)
7360 emit_insn (gen_vsx_splat_v2df (target, op[0]));
7361 else
7362 emit_insn (gen_vsx_splat_v2di (target, op[0]));
7364 else
7366 if (mode == V2DFmode)
7367 emit_insn (gen_vsx_concat_v2df (target, op[0], op[1]));
7368 else
7369 emit_insn (gen_vsx_concat_v2di (target, op[0], op[1]));
7371 return;
7374 /* Special case initializing vector int if we are on 64-bit systems with
7375 direct move or we have the ISA 3.0 instructions. */
7376 if (mode == V4SImode && VECTOR_MEM_VSX_P (V4SImode)
7377 && TARGET_DIRECT_MOVE_64BIT)
7379 if (all_same)
7381 rtx element0 = XVECEXP (vals, 0, 0);
7382 if (MEM_P (element0))
7383 element0 = rs6000_address_for_fpconvert (element0);
7384 else
7385 element0 = force_reg (SImode, element0);
7387 if (TARGET_P9_VECTOR)
7388 emit_insn (gen_vsx_splat_v4si (target, element0));
7389 else
7391 rtx tmp = gen_reg_rtx (DImode);
7392 emit_insn (gen_zero_extendsidi2 (tmp, element0));
7393 emit_insn (gen_vsx_splat_v4si_di (target, tmp));
7395 return;
7397 else
7399 rtx elements[4];
7400 size_t i;
7402 for (i = 0; i < 4; i++)
7404 elements[i] = XVECEXP (vals, 0, i);
7405 if (!CONST_INT_P (elements[i]) && !REG_P (elements[i]))
7406 elements[i] = copy_to_mode_reg (SImode, elements[i]);
7409 emit_insn (gen_vsx_init_v4si (target, elements[0], elements[1],
7410 elements[2], elements[3]));
7411 return;
7415 /* With single precision floating point on VSX, know that internally single
7416 precision is actually represented as a double, and either make 2 V2DF
7417 vectors, and convert these vectors to single precision, or do one
7418 conversion, and splat the result to the other elements. */
7419 if (mode == V4SFmode && VECTOR_MEM_VSX_P (V4SFmode))
7421 if (all_same)
7423 rtx element0 = XVECEXP (vals, 0, 0);
7425 if (TARGET_P9_VECTOR)
7427 if (MEM_P (element0))
7428 element0 = rs6000_address_for_fpconvert (element0);
7430 emit_insn (gen_vsx_splat_v4sf (target, element0));
7433 else
7435 rtx freg = gen_reg_rtx (V4SFmode);
7436 rtx sreg = force_reg (SFmode, element0);
7437 rtx cvt = (TARGET_XSCVDPSPN
7438 ? gen_vsx_xscvdpspn_scalar (freg, sreg)
7439 : gen_vsx_xscvdpsp_scalar (freg, sreg));
7441 emit_insn (cvt);
7442 emit_insn (gen_vsx_xxspltw_v4sf_direct (target, freg,
7443 const0_rtx));
7446 else
7448 rtx dbl_even = gen_reg_rtx (V2DFmode);
7449 rtx dbl_odd = gen_reg_rtx (V2DFmode);
7450 rtx flt_even = gen_reg_rtx (V4SFmode);
7451 rtx flt_odd = gen_reg_rtx (V4SFmode);
7452 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
7453 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
7454 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
7455 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
7457 /* Use VMRGEW if we can instead of doing a permute. */
7458 if (TARGET_P8_VECTOR)
7460 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op2));
7461 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op1, op3));
7462 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
7463 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
7464 if (BYTES_BIG_ENDIAN)
7465 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_even, flt_odd));
7466 else
7467 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_odd, flt_even));
7469 else
7471 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
7472 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
7473 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
7474 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
7475 rs6000_expand_extract_even (target, flt_even, flt_odd);
7478 return;
7481 /* Special case initializing vector short/char that are splats if we are on
7482 64-bit systems with direct move. */
7483 if (all_same && TARGET_DIRECT_MOVE_64BIT
7484 && (mode == V16QImode || mode == V8HImode))
7486 rtx op0 = XVECEXP (vals, 0, 0);
7487 rtx di_tmp = gen_reg_rtx (DImode);
7489 if (!REG_P (op0))
7490 op0 = force_reg (GET_MODE_INNER (mode), op0);
7492 if (mode == V16QImode)
7494 emit_insn (gen_zero_extendqidi2 (di_tmp, op0));
7495 emit_insn (gen_vsx_vspltb_di (target, di_tmp));
7496 return;
7499 if (mode == V8HImode)
7501 emit_insn (gen_zero_extendhidi2 (di_tmp, op0));
7502 emit_insn (gen_vsx_vsplth_di (target, di_tmp));
7503 return;
7507 /* Store value to stack temp. Load vector element. Splat. However, splat
7508 of 64-bit items is not supported on Altivec. */
7509 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
7511 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
7512 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
7513 XVECEXP (vals, 0, 0));
7514 x = gen_rtx_UNSPEC (VOIDmode,
7515 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
7516 emit_insn (gen_rtx_PARALLEL (VOIDmode,
7517 gen_rtvec (2,
7518 gen_rtx_SET (target, mem),
7519 x)));
7520 x = gen_rtx_VEC_SELECT (inner_mode, target,
7521 gen_rtx_PARALLEL (VOIDmode,
7522 gen_rtvec (1, const0_rtx)));
7523 emit_insn (gen_rtx_SET (target, gen_rtx_VEC_DUPLICATE (mode, x)));
7524 return;
7527 /* One field is non-constant. Load constant then overwrite
7528 varying field. */
7529 if (n_var == 1)
7531 rtx copy = copy_rtx (vals);
7533 /* Load constant part of vector, substitute neighboring value for
7534 varying element. */
7535 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
7536 rs6000_expand_vector_init (target, copy);
7538 /* Insert variable. */
7539 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
7540 return;
7543 /* Construct the vector in memory one field at a time
7544 and load the whole vector. */
7545 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
7546 for (i = 0; i < n_elts; i++)
7547 emit_move_insn (adjust_address_nv (mem, inner_mode,
7548 i * GET_MODE_SIZE (inner_mode)),
7549 XVECEXP (vals, 0, i));
7550 emit_move_insn (target, mem);
7553 /* Set field ELT of TARGET to VAL. */
7555 void
7556 rs6000_expand_vector_set (rtx target, rtx val, int elt)
7558 machine_mode mode = GET_MODE (target);
7559 machine_mode inner_mode = GET_MODE_INNER (mode);
7560 rtx reg = gen_reg_rtx (mode);
7561 rtx mask, mem, x;
7562 int width = GET_MODE_SIZE (inner_mode);
7563 int i;
7565 val = force_reg (GET_MODE (val), val);
7567 if (VECTOR_MEM_VSX_P (mode))
7569 rtx insn = NULL_RTX;
7570 rtx elt_rtx = GEN_INT (elt);
7572 if (mode == V2DFmode)
7573 insn = gen_vsx_set_v2df (target, target, val, elt_rtx);
7575 else if (mode == V2DImode)
7576 insn = gen_vsx_set_v2di (target, target, val, elt_rtx);
7578 else if (TARGET_P9_VECTOR && TARGET_VSX_SMALL_INTEGER
7579 && TARGET_UPPER_REGS_DI && TARGET_POWERPC64)
7581 if (mode == V4SImode)
7582 insn = gen_vsx_set_v4si_p9 (target, target, val, elt_rtx);
7583 else if (mode == V8HImode)
7584 insn = gen_vsx_set_v8hi_p9 (target, target, val, elt_rtx);
7585 else if (mode == V16QImode)
7586 insn = gen_vsx_set_v16qi_p9 (target, target, val, elt_rtx);
7589 if (insn)
7591 emit_insn (insn);
7592 return;
7596 /* Simplify setting single element vectors like V1TImode. */
7597 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE (inner_mode) && elt == 0)
7599 emit_move_insn (target, gen_lowpart (mode, val));
7600 return;
7603 /* Load single variable value. */
7604 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
7605 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
7606 x = gen_rtx_UNSPEC (VOIDmode,
7607 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
7608 emit_insn (gen_rtx_PARALLEL (VOIDmode,
7609 gen_rtvec (2,
7610 gen_rtx_SET (reg, mem),
7611 x)));
7613 /* Linear sequence. */
7614 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
7615 for (i = 0; i < 16; ++i)
7616 XVECEXP (mask, 0, i) = GEN_INT (i);
7618 /* Set permute mask to insert element into target. */
7619 for (i = 0; i < width; ++i)
7620 XVECEXP (mask, 0, elt*width + i)
7621 = GEN_INT (i + 0x10);
7622 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
7624 if (BYTES_BIG_ENDIAN)
7625 x = gen_rtx_UNSPEC (mode,
7626 gen_rtvec (3, target, reg,
7627 force_reg (V16QImode, x)),
7628 UNSPEC_VPERM);
7629 else
7631 if (TARGET_P9_VECTOR)
7632 x = gen_rtx_UNSPEC (mode,
7633 gen_rtvec (3, target, reg,
7634 force_reg (V16QImode, x)),
7635 UNSPEC_VPERMR);
7636 else
7638 /* Invert selector. We prefer to generate VNAND on P8 so
7639 that future fusion opportunities can kick in, but must
7640 generate VNOR elsewhere. */
7641 rtx notx = gen_rtx_NOT (V16QImode, force_reg (V16QImode, x));
7642 rtx iorx = (TARGET_P8_VECTOR
7643 ? gen_rtx_IOR (V16QImode, notx, notx)
7644 : gen_rtx_AND (V16QImode, notx, notx));
7645 rtx tmp = gen_reg_rtx (V16QImode);
7646 emit_insn (gen_rtx_SET (tmp, iorx));
7648 /* Permute with operands reversed and adjusted selector. */
7649 x = gen_rtx_UNSPEC (mode, gen_rtvec (3, reg, target, tmp),
7650 UNSPEC_VPERM);
7654 emit_insn (gen_rtx_SET (target, x));
7657 /* Extract field ELT from VEC into TARGET. */
7659 void
7660 rs6000_expand_vector_extract (rtx target, rtx vec, rtx elt)
7662 machine_mode mode = GET_MODE (vec);
7663 machine_mode inner_mode = GET_MODE_INNER (mode);
7664 rtx mem;
7666 if (VECTOR_MEM_VSX_P (mode) && CONST_INT_P (elt))
7668 switch (mode)
7670 default:
7671 break;
7672 case V1TImode:
7673 gcc_assert (INTVAL (elt) == 0 && inner_mode == TImode);
7674 emit_move_insn (target, gen_lowpart (TImode, vec));
7675 break;
7676 case V2DFmode:
7677 emit_insn (gen_vsx_extract_v2df (target, vec, elt));
7678 return;
7679 case V2DImode:
7680 emit_insn (gen_vsx_extract_v2di (target, vec, elt));
7681 return;
7682 case V4SFmode:
7683 emit_insn (gen_vsx_extract_v4sf (target, vec, elt));
7684 return;
7685 case V16QImode:
7686 if (TARGET_DIRECT_MOVE_64BIT)
7688 emit_insn (gen_vsx_extract_v16qi (target, vec, elt));
7689 return;
7691 else
7692 break;
7693 case V8HImode:
7694 if (TARGET_DIRECT_MOVE_64BIT)
7696 emit_insn (gen_vsx_extract_v8hi (target, vec, elt));
7697 return;
7699 else
7700 break;
7701 case V4SImode:
7702 if (TARGET_DIRECT_MOVE_64BIT)
7704 emit_insn (gen_vsx_extract_v4si (target, vec, elt));
7705 return;
7707 break;
7710 else if (VECTOR_MEM_VSX_P (mode) && !CONST_INT_P (elt)
7711 && TARGET_DIRECT_MOVE_64BIT)
7713 if (GET_MODE (elt) != DImode)
7715 rtx tmp = gen_reg_rtx (DImode);
7716 convert_move (tmp, elt, 0);
7717 elt = tmp;
7719 else if (!REG_P (elt))
7720 elt = force_reg (DImode, elt);
7722 switch (mode)
7724 case V2DFmode:
7725 emit_insn (gen_vsx_extract_v2df_var (target, vec, elt));
7726 return;
7728 case V2DImode:
7729 emit_insn (gen_vsx_extract_v2di_var (target, vec, elt));
7730 return;
7732 case V4SFmode:
7733 emit_insn (gen_vsx_extract_v4sf_var (target, vec, elt));
7734 return;
7736 case V4SImode:
7737 emit_insn (gen_vsx_extract_v4si_var (target, vec, elt));
7738 return;
7740 case V8HImode:
7741 emit_insn (gen_vsx_extract_v8hi_var (target, vec, elt));
7742 return;
7744 case V16QImode:
7745 emit_insn (gen_vsx_extract_v16qi_var (target, vec, elt));
7746 return;
7748 default:
7749 gcc_unreachable ();
7753 gcc_assert (CONST_INT_P (elt));
7755 /* Allocate mode-sized buffer. */
7756 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
7758 emit_move_insn (mem, vec);
7760 /* Add offset to field within buffer matching vector element. */
7761 mem = adjust_address_nv (mem, inner_mode,
7762 INTVAL (elt) * GET_MODE_SIZE (inner_mode));
7764 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
7767 /* Helper function to return the register number of a RTX. */
7768 static inline int
7769 regno_or_subregno (rtx op)
7771 if (REG_P (op))
7772 return REGNO (op);
7773 else if (SUBREG_P (op))
7774 return subreg_regno (op);
7775 else
7776 gcc_unreachable ();
7779 /* Adjust a memory address (MEM) of a vector type to point to a scalar field
7780 within the vector (ELEMENT) with a mode (SCALAR_MODE). Use a base register
7781 temporary (BASE_TMP) to fixup the address. Return the new memory address
7782 that is valid for reads or writes to a given register (SCALAR_REG). */
7785 rs6000_adjust_vec_address (rtx scalar_reg,
7786 rtx mem,
7787 rtx element,
7788 rtx base_tmp,
7789 machine_mode scalar_mode)
7791 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7792 rtx addr = XEXP (mem, 0);
7793 rtx element_offset;
7794 rtx new_addr;
7795 bool valid_addr_p;
7797 /* Vector addresses should not have PRE_INC, PRE_DEC, or PRE_MODIFY. */
7798 gcc_assert (GET_RTX_CLASS (GET_CODE (addr)) != RTX_AUTOINC);
7800 /* Calculate what we need to add to the address to get the element
7801 address. */
7802 if (CONST_INT_P (element))
7803 element_offset = GEN_INT (INTVAL (element) * scalar_size);
7804 else
7806 int byte_shift = exact_log2 (scalar_size);
7807 gcc_assert (byte_shift >= 0);
7809 if (byte_shift == 0)
7810 element_offset = element;
7812 else
7814 if (TARGET_POWERPC64)
7815 emit_insn (gen_ashldi3 (base_tmp, element, GEN_INT (byte_shift)));
7816 else
7817 emit_insn (gen_ashlsi3 (base_tmp, element, GEN_INT (byte_shift)));
7819 element_offset = base_tmp;
7823 /* Create the new address pointing to the element within the vector. If we
7824 are adding 0, we don't have to change the address. */
7825 if (element_offset == const0_rtx)
7826 new_addr = addr;
7828 /* A simple indirect address can be converted into a reg + offset
7829 address. */
7830 else if (REG_P (addr) || SUBREG_P (addr))
7831 new_addr = gen_rtx_PLUS (Pmode, addr, element_offset);
7833 /* Optimize D-FORM addresses with constant offset with a constant element, to
7834 include the element offset in the address directly. */
7835 else if (GET_CODE (addr) == PLUS)
7837 rtx op0 = XEXP (addr, 0);
7838 rtx op1 = XEXP (addr, 1);
7839 rtx insn;
7841 gcc_assert (REG_P (op0) || SUBREG_P (op0));
7842 if (CONST_INT_P (op1) && CONST_INT_P (element_offset))
7844 HOST_WIDE_INT offset = INTVAL (op1) + INTVAL (element_offset);
7845 rtx offset_rtx = GEN_INT (offset);
7847 if (IN_RANGE (offset, -32768, 32767)
7848 && (scalar_size < 8 || (offset & 0x3) == 0))
7849 new_addr = gen_rtx_PLUS (Pmode, op0, offset_rtx);
7850 else
7852 emit_move_insn (base_tmp, offset_rtx);
7853 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7856 else
7858 bool op1_reg_p = (REG_P (op1) || SUBREG_P (op1));
7859 bool ele_reg_p = (REG_P (element_offset) || SUBREG_P (element_offset));
7861 /* Note, ADDI requires the register being added to be a base
7862 register. If the register was R0, load it up into the temporary
7863 and do the add. */
7864 if (op1_reg_p
7865 && (ele_reg_p || reg_or_subregno (op1) != FIRST_GPR_REGNO))
7867 insn = gen_add3_insn (base_tmp, op1, element_offset);
7868 gcc_assert (insn != NULL_RTX);
7869 emit_insn (insn);
7872 else if (ele_reg_p
7873 && reg_or_subregno (element_offset) != FIRST_GPR_REGNO)
7875 insn = gen_add3_insn (base_tmp, element_offset, op1);
7876 gcc_assert (insn != NULL_RTX);
7877 emit_insn (insn);
7880 else
7882 emit_move_insn (base_tmp, op1);
7883 emit_insn (gen_add2_insn (base_tmp, element_offset));
7886 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7890 else
7892 emit_move_insn (base_tmp, addr);
7893 new_addr = gen_rtx_PLUS (Pmode, base_tmp, element_offset);
7896 /* If we have a PLUS, we need to see whether the particular register class
7897 allows for D-FORM or X-FORM addressing. */
7898 if (GET_CODE (new_addr) == PLUS)
7900 rtx op1 = XEXP (new_addr, 1);
7901 addr_mask_type addr_mask;
7902 int scalar_regno = regno_or_subregno (scalar_reg);
7904 gcc_assert (scalar_regno < FIRST_PSEUDO_REGISTER);
7905 if (INT_REGNO_P (scalar_regno))
7906 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_GPR];
7908 else if (FP_REGNO_P (scalar_regno))
7909 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_FPR];
7911 else if (ALTIVEC_REGNO_P (scalar_regno))
7912 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_VMX];
7914 else
7915 gcc_unreachable ();
7917 if (REG_P (op1) || SUBREG_P (op1))
7918 valid_addr_p = (addr_mask & RELOAD_REG_INDEXED) != 0;
7919 else
7920 valid_addr_p = (addr_mask & RELOAD_REG_OFFSET) != 0;
7923 else if (REG_P (new_addr) || SUBREG_P (new_addr))
7924 valid_addr_p = true;
7926 else
7927 valid_addr_p = false;
7929 if (!valid_addr_p)
7931 emit_move_insn (base_tmp, new_addr);
7932 new_addr = base_tmp;
7935 return change_address (mem, scalar_mode, new_addr);
7938 /* Split a variable vec_extract operation into the component instructions. */
7940 void
7941 rs6000_split_vec_extract_var (rtx dest, rtx src, rtx element, rtx tmp_gpr,
7942 rtx tmp_altivec)
7944 machine_mode mode = GET_MODE (src);
7945 machine_mode scalar_mode = GET_MODE (dest);
7946 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7947 int byte_shift = exact_log2 (scalar_size);
7949 gcc_assert (byte_shift >= 0);
7951 /* If we are given a memory address, optimize to load just the element. We
7952 don't have to adjust the vector element number on little endian
7953 systems. */
7954 if (MEM_P (src))
7956 gcc_assert (REG_P (tmp_gpr));
7957 emit_move_insn (dest, rs6000_adjust_vec_address (dest, src, element,
7958 tmp_gpr, scalar_mode));
7959 return;
7962 else if (REG_P (src) || SUBREG_P (src))
7964 int bit_shift = byte_shift + 3;
7965 rtx element2;
7966 int dest_regno = regno_or_subregno (dest);
7967 int src_regno = regno_or_subregno (src);
7968 int element_regno = regno_or_subregno (element);
7970 gcc_assert (REG_P (tmp_gpr));
7972 /* See if we want to generate VEXTU{B,H,W}{L,R}X if the destination is in
7973 a general purpose register. */
7974 if (TARGET_P9_VECTOR
7975 && (mode == V16QImode || mode == V8HImode || mode == V4SImode)
7976 && INT_REGNO_P (dest_regno)
7977 && ALTIVEC_REGNO_P (src_regno)
7978 && INT_REGNO_P (element_regno))
7980 rtx dest_si = gen_rtx_REG (SImode, dest_regno);
7981 rtx element_si = gen_rtx_REG (SImode, element_regno);
7983 if (mode == V16QImode)
7984 emit_insn (VECTOR_ELT_ORDER_BIG
7985 ? gen_vextublx (dest_si, element_si, src)
7986 : gen_vextubrx (dest_si, element_si, src));
7988 else if (mode == V8HImode)
7990 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7991 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const1_rtx));
7992 emit_insn (VECTOR_ELT_ORDER_BIG
7993 ? gen_vextuhlx (dest_si, tmp_gpr_si, src)
7994 : gen_vextuhrx (dest_si, tmp_gpr_si, src));
7998 else
8000 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
8001 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const2_rtx));
8002 emit_insn (VECTOR_ELT_ORDER_BIG
8003 ? gen_vextuwlx (dest_si, tmp_gpr_si, src)
8004 : gen_vextuwrx (dest_si, tmp_gpr_si, src));
8007 return;
8011 gcc_assert (REG_P (tmp_altivec));
8013 /* For little endian, adjust element ordering. For V2DI/V2DF, we can use
8014 an XOR, otherwise we need to subtract. The shift amount is so VSLO
8015 will shift the element into the upper position (adding 3 to convert a
8016 byte shift into a bit shift). */
8017 if (scalar_size == 8)
8019 if (!VECTOR_ELT_ORDER_BIG)
8021 emit_insn (gen_xordi3 (tmp_gpr, element, const1_rtx));
8022 element2 = tmp_gpr;
8024 else
8025 element2 = element;
8027 /* Generate RLDIC directly to shift left 6 bits and retrieve 1
8028 bit. */
8029 emit_insn (gen_rtx_SET (tmp_gpr,
8030 gen_rtx_AND (DImode,
8031 gen_rtx_ASHIFT (DImode,
8032 element2,
8033 GEN_INT (6)),
8034 GEN_INT (64))));
8036 else
8038 if (!VECTOR_ELT_ORDER_BIG)
8040 rtx num_ele_m1 = GEN_INT (GET_MODE_NUNITS (mode) - 1);
8042 emit_insn (gen_anddi3 (tmp_gpr, element, num_ele_m1));
8043 emit_insn (gen_subdi3 (tmp_gpr, num_ele_m1, tmp_gpr));
8044 element2 = tmp_gpr;
8046 else
8047 element2 = element;
8049 emit_insn (gen_ashldi3 (tmp_gpr, element2, GEN_INT (bit_shift)));
8052 /* Get the value into the lower byte of the Altivec register where VSLO
8053 expects it. */
8054 if (TARGET_P9_VECTOR)
8055 emit_insn (gen_vsx_splat_v2di (tmp_altivec, tmp_gpr));
8056 else if (can_create_pseudo_p ())
8057 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_gpr, tmp_gpr));
8058 else
8060 rtx tmp_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
8061 emit_move_insn (tmp_di, tmp_gpr);
8062 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_di, tmp_di));
8065 /* Do the VSLO to get the value into the final location. */
8066 switch (mode)
8068 case V2DFmode:
8069 emit_insn (gen_vsx_vslo_v2df (dest, src, tmp_altivec));
8070 return;
8072 case V2DImode:
8073 emit_insn (gen_vsx_vslo_v2di (dest, src, tmp_altivec));
8074 return;
8076 case V4SFmode:
8078 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
8079 rtx tmp_altivec_v4sf = gen_rtx_REG (V4SFmode, REGNO (tmp_altivec));
8080 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
8081 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
8082 tmp_altivec));
8084 emit_insn (gen_vsx_xscvspdp_scalar2 (dest, tmp_altivec_v4sf));
8085 return;
8088 case V4SImode:
8089 case V8HImode:
8090 case V16QImode:
8092 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
8093 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
8094 rtx tmp_gpr_di = gen_rtx_REG (DImode, REGNO (dest));
8095 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
8096 tmp_altivec));
8097 emit_move_insn (tmp_gpr_di, tmp_altivec_di);
8098 emit_insn (gen_ashrdi3 (tmp_gpr_di, tmp_gpr_di,
8099 GEN_INT (64 - (8 * scalar_size))));
8100 return;
8103 default:
8104 gcc_unreachable ();
8107 return;
8109 else
8110 gcc_unreachable ();
8113 /* Helper function for rs6000_split_v4si_init to build up a DImode value from
8114 two SImode values. */
8116 static void
8117 rs6000_split_v4si_init_di_reg (rtx dest, rtx si1, rtx si2, rtx tmp)
8119 const unsigned HOST_WIDE_INT mask_32bit = HOST_WIDE_INT_C (0xffffffff);
8121 if (CONST_INT_P (si1) && CONST_INT_P (si2))
8123 unsigned HOST_WIDE_INT const1 = (UINTVAL (si1) & mask_32bit) << 32;
8124 unsigned HOST_WIDE_INT const2 = UINTVAL (si2) & mask_32bit;
8126 emit_move_insn (dest, GEN_INT (const1 | const2));
8127 return;
8130 /* Put si1 into upper 32-bits of dest. */
8131 if (CONST_INT_P (si1))
8132 emit_move_insn (dest, GEN_INT ((UINTVAL (si1) & mask_32bit) << 32));
8133 else
8135 /* Generate RLDIC. */
8136 rtx si1_di = gen_rtx_REG (DImode, regno_or_subregno (si1));
8137 rtx shift_rtx = gen_rtx_ASHIFT (DImode, si1_di, GEN_INT (32));
8138 rtx mask_rtx = GEN_INT (mask_32bit << 32);
8139 rtx and_rtx = gen_rtx_AND (DImode, shift_rtx, mask_rtx);
8140 gcc_assert (!reg_overlap_mentioned_p (dest, si1));
8141 emit_insn (gen_rtx_SET (dest, and_rtx));
8144 /* Put si2 into the temporary. */
8145 gcc_assert (!reg_overlap_mentioned_p (dest, tmp));
8146 if (CONST_INT_P (si2))
8147 emit_move_insn (tmp, GEN_INT (UINTVAL (si2) & mask_32bit));
8148 else
8149 emit_insn (gen_zero_extendsidi2 (tmp, si2));
8151 /* Combine the two parts. */
8152 emit_insn (gen_iordi3 (dest, dest, tmp));
8153 return;
8156 /* Split a V4SI initialization. */
8158 void
8159 rs6000_split_v4si_init (rtx operands[])
8161 rtx dest = operands[0];
8163 /* Destination is a GPR, build up the two DImode parts in place. */
8164 if (REG_P (dest) || SUBREG_P (dest))
8166 int d_regno = regno_or_subregno (dest);
8167 rtx scalar1 = operands[1];
8168 rtx scalar2 = operands[2];
8169 rtx scalar3 = operands[3];
8170 rtx scalar4 = operands[4];
8171 rtx tmp1 = operands[5];
8172 rtx tmp2 = operands[6];
8174 /* Even though we only need one temporary (plus the destination, which
8175 has an early clobber constraint, try to use two temporaries, one for
8176 each double word created. That way the 2nd insn scheduling pass can
8177 rearrange things so the two parts are done in parallel. */
8178 if (BYTES_BIG_ENDIAN)
8180 rtx di_lo = gen_rtx_REG (DImode, d_regno);
8181 rtx di_hi = gen_rtx_REG (DImode, d_regno + 1);
8182 rs6000_split_v4si_init_di_reg (di_lo, scalar1, scalar2, tmp1);
8183 rs6000_split_v4si_init_di_reg (di_hi, scalar3, scalar4, tmp2);
8185 else
8187 rtx di_lo = gen_rtx_REG (DImode, d_regno + 1);
8188 rtx di_hi = gen_rtx_REG (DImode, d_regno);
8189 gcc_assert (!VECTOR_ELT_ORDER_BIG);
8190 rs6000_split_v4si_init_di_reg (di_lo, scalar4, scalar3, tmp1);
8191 rs6000_split_v4si_init_di_reg (di_hi, scalar2, scalar1, tmp2);
8193 return;
8196 else
8197 gcc_unreachable ();
8200 /* Return TRUE if OP is an invalid SUBREG operation on the e500. */
8202 bool
8203 invalid_e500_subreg (rtx op, machine_mode mode)
8205 if (TARGET_E500_DOUBLE)
8207 /* Reject (subreg:SI (reg:DF)); likewise with subreg:DI or
8208 subreg:TI and reg:TF. Decimal float modes are like integer
8209 modes (only low part of each register used) for this
8210 purpose. */
8211 if (GET_CODE (op) == SUBREG
8212 && (mode == SImode || mode == DImode || mode == TImode
8213 || mode == DDmode || mode == TDmode || mode == PTImode)
8214 && REG_P (SUBREG_REG (op))
8215 && (GET_MODE (SUBREG_REG (op)) == DFmode
8216 || GET_MODE (SUBREG_REG (op)) == TFmode
8217 || GET_MODE (SUBREG_REG (op)) == IFmode
8218 || GET_MODE (SUBREG_REG (op)) == KFmode))
8219 return true;
8221 /* Reject (subreg:DF (reg:DI)); likewise with subreg:TF and
8222 reg:TI. */
8223 if (GET_CODE (op) == SUBREG
8224 && (mode == DFmode || mode == TFmode || mode == IFmode
8225 || mode == KFmode)
8226 && REG_P (SUBREG_REG (op))
8227 && (GET_MODE (SUBREG_REG (op)) == DImode
8228 || GET_MODE (SUBREG_REG (op)) == TImode
8229 || GET_MODE (SUBREG_REG (op)) == PTImode
8230 || GET_MODE (SUBREG_REG (op)) == DDmode
8231 || GET_MODE (SUBREG_REG (op)) == TDmode))
8232 return true;
8235 if (TARGET_SPE
8236 && GET_CODE (op) == SUBREG
8237 && mode == SImode
8238 && REG_P (SUBREG_REG (op))
8239 && SPE_VECTOR_MODE (GET_MODE (SUBREG_REG (op))))
8240 return true;
8242 return false;
8245 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
8246 selects whether the alignment is abi mandated, optional, or
8247 both abi and optional alignment. */
8249 unsigned int
8250 rs6000_data_alignment (tree type, unsigned int align, enum data_align how)
8252 if (how != align_opt)
8254 if (TREE_CODE (type) == VECTOR_TYPE)
8256 if ((TARGET_SPE && SPE_VECTOR_MODE (TYPE_MODE (type)))
8257 || (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (TYPE_MODE (type))))
8259 if (align < 64)
8260 align = 64;
8262 else if (align < 128)
8263 align = 128;
8265 else if (TARGET_E500_DOUBLE
8266 && TREE_CODE (type) == REAL_TYPE
8267 && TYPE_MODE (type) == DFmode)
8269 if (align < 64)
8270 align = 64;
8274 if (how != align_abi)
8276 if (TREE_CODE (type) == ARRAY_TYPE
8277 && TYPE_MODE (TREE_TYPE (type)) == QImode)
8279 if (align < BITS_PER_WORD)
8280 align = BITS_PER_WORD;
8284 return align;
8287 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
8289 bool
8290 rs6000_special_adjust_field_align_p (tree type, unsigned int computed)
8292 if (TARGET_ALTIVEC && TREE_CODE (type) == VECTOR_TYPE)
8294 if (computed != 128)
8296 static bool warned;
8297 if (!warned && warn_psabi)
8299 warned = true;
8300 inform (input_location,
8301 "the layout of aggregates containing vectors with"
8302 " %d-byte alignment has changed in GCC 5",
8303 computed / BITS_PER_UNIT);
8306 /* In current GCC there is no special case. */
8307 return false;
8310 return false;
8313 /* AIX increases natural record alignment to doubleword if the first
8314 field is an FP double while the FP fields remain word aligned. */
8316 unsigned int
8317 rs6000_special_round_type_align (tree type, unsigned int computed,
8318 unsigned int specified)
8320 unsigned int align = MAX (computed, specified);
8321 tree field = TYPE_FIELDS (type);
8323 /* Skip all non field decls */
8324 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
8325 field = DECL_CHAIN (field);
8327 if (field != NULL && field != type)
8329 type = TREE_TYPE (field);
8330 while (TREE_CODE (type) == ARRAY_TYPE)
8331 type = TREE_TYPE (type);
8333 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
8334 align = MAX (align, 64);
8337 return align;
8340 /* Darwin increases record alignment to the natural alignment of
8341 the first field. */
8343 unsigned int
8344 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
8345 unsigned int specified)
8347 unsigned int align = MAX (computed, specified);
8349 if (TYPE_PACKED (type))
8350 return align;
8352 /* Find the first field, looking down into aggregates. */
8353 do {
8354 tree field = TYPE_FIELDS (type);
8355 /* Skip all non field decls */
8356 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
8357 field = DECL_CHAIN (field);
8358 if (! field)
8359 break;
8360 /* A packed field does not contribute any extra alignment. */
8361 if (DECL_PACKED (field))
8362 return align;
8363 type = TREE_TYPE (field);
8364 while (TREE_CODE (type) == ARRAY_TYPE)
8365 type = TREE_TYPE (type);
8366 } while (AGGREGATE_TYPE_P (type));
8368 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
8369 align = MAX (align, TYPE_ALIGN (type));
8371 return align;
8374 /* Return 1 for an operand in small memory on V.4/eabi. */
8377 small_data_operand (rtx op ATTRIBUTE_UNUSED,
8378 machine_mode mode ATTRIBUTE_UNUSED)
8380 #if TARGET_ELF
8381 rtx sym_ref;
8383 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
8384 return 0;
8386 if (DEFAULT_ABI != ABI_V4)
8387 return 0;
8389 /* Vector and float memory instructions have a limited offset on the
8390 SPE, so using a vector or float variable directly as an operand is
8391 not useful. */
8392 if (TARGET_SPE
8393 && (SPE_VECTOR_MODE (mode) || FLOAT_MODE_P (mode)))
8394 return 0;
8396 if (GET_CODE (op) == SYMBOL_REF)
8397 sym_ref = op;
8399 else if (GET_CODE (op) != CONST
8400 || GET_CODE (XEXP (op, 0)) != PLUS
8401 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF
8402 || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT)
8403 return 0;
8405 else
8407 rtx sum = XEXP (op, 0);
8408 HOST_WIDE_INT summand;
8410 /* We have to be careful here, because it is the referenced address
8411 that must be 32k from _SDA_BASE_, not just the symbol. */
8412 summand = INTVAL (XEXP (sum, 1));
8413 if (summand < 0 || summand > g_switch_value)
8414 return 0;
8416 sym_ref = XEXP (sum, 0);
8419 return SYMBOL_REF_SMALL_P (sym_ref);
8420 #else
8421 return 0;
8422 #endif
8425 /* Return true if either operand is a general purpose register. */
8427 bool
8428 gpr_or_gpr_p (rtx op0, rtx op1)
8430 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
8431 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
8434 /* Return true if this is a move direct operation between GPR registers and
8435 floating point/VSX registers. */
8437 bool
8438 direct_move_p (rtx op0, rtx op1)
8440 int regno0, regno1;
8442 if (!REG_P (op0) || !REG_P (op1))
8443 return false;
8445 if (!TARGET_DIRECT_MOVE && !TARGET_MFPGPR)
8446 return false;
8448 regno0 = REGNO (op0);
8449 regno1 = REGNO (op1);
8450 if (regno0 >= FIRST_PSEUDO_REGISTER || regno1 >= FIRST_PSEUDO_REGISTER)
8451 return false;
8453 if (INT_REGNO_P (regno0))
8454 return (TARGET_DIRECT_MOVE) ? VSX_REGNO_P (regno1) : FP_REGNO_P (regno1);
8456 else if (INT_REGNO_P (regno1))
8458 if (TARGET_MFPGPR && FP_REGNO_P (regno0))
8459 return true;
8461 else if (TARGET_DIRECT_MOVE && VSX_REGNO_P (regno0))
8462 return true;
8465 return false;
8468 /* Return true if the OFFSET is valid for the quad address instructions that
8469 use d-form (register + offset) addressing. */
8471 static inline bool
8472 quad_address_offset_p (HOST_WIDE_INT offset)
8474 return (IN_RANGE (offset, -32768, 32767) && ((offset) & 0xf) == 0);
8477 /* Return true if the ADDR is an acceptable address for a quad memory
8478 operation of mode MODE (either LQ/STQ for general purpose registers, or
8479 LXV/STXV for vector registers under ISA 3.0. GPR_P is true if this address
8480 is intended for LQ/STQ. If it is false, the address is intended for the ISA
8481 3.0 LXV/STXV instruction. */
8483 bool
8484 quad_address_p (rtx addr, machine_mode mode, bool strict)
8486 rtx op0, op1;
8488 if (GET_MODE_SIZE (mode) != 16)
8489 return false;
8491 if (legitimate_indirect_address_p (addr, strict))
8492 return true;
8494 if (VECTOR_MODE_P (mode) && !mode_supports_vsx_dform_quad (mode))
8495 return false;
8497 if (GET_CODE (addr) != PLUS)
8498 return false;
8500 op0 = XEXP (addr, 0);
8501 if (!REG_P (op0) || !INT_REG_OK_FOR_BASE_P (op0, strict))
8502 return false;
8504 op1 = XEXP (addr, 1);
8505 if (!CONST_INT_P (op1))
8506 return false;
8508 return quad_address_offset_p (INTVAL (op1));
8511 /* Return true if this is a load or store quad operation. This function does
8512 not handle the atomic quad memory instructions. */
8514 bool
8515 quad_load_store_p (rtx op0, rtx op1)
8517 bool ret;
8519 if (!TARGET_QUAD_MEMORY)
8520 ret = false;
8522 else if (REG_P (op0) && MEM_P (op1))
8523 ret = (quad_int_reg_operand (op0, GET_MODE (op0))
8524 && quad_memory_operand (op1, GET_MODE (op1))
8525 && !reg_overlap_mentioned_p (op0, op1));
8527 else if (MEM_P (op0) && REG_P (op1))
8528 ret = (quad_memory_operand (op0, GET_MODE (op0))
8529 && quad_int_reg_operand (op1, GET_MODE (op1)));
8531 else
8532 ret = false;
8534 if (TARGET_DEBUG_ADDR)
8536 fprintf (stderr, "\n========== quad_load_store, return %s\n",
8537 ret ? "true" : "false");
8538 debug_rtx (gen_rtx_SET (op0, op1));
8541 return ret;
8544 /* Given an address, return a constant offset term if one exists. */
8546 static rtx
8547 address_offset (rtx op)
8549 if (GET_CODE (op) == PRE_INC
8550 || GET_CODE (op) == PRE_DEC)
8551 op = XEXP (op, 0);
8552 else if (GET_CODE (op) == PRE_MODIFY
8553 || GET_CODE (op) == LO_SUM)
8554 op = XEXP (op, 1);
8556 if (GET_CODE (op) == CONST)
8557 op = XEXP (op, 0);
8559 if (GET_CODE (op) == PLUS)
8560 op = XEXP (op, 1);
8562 if (CONST_INT_P (op))
8563 return op;
8565 return NULL_RTX;
8568 /* Return true if the MEM operand is a memory operand suitable for use
8569 with a (full width, possibly multiple) gpr load/store. On
8570 powerpc64 this means the offset must be divisible by 4.
8571 Implements 'Y' constraint.
8573 Accept direct, indexed, offset, lo_sum and tocref. Since this is
8574 a constraint function we know the operand has satisfied a suitable
8575 memory predicate. Also accept some odd rtl generated by reload
8576 (see rs6000_legitimize_reload_address for various forms). It is
8577 important that reload rtl be accepted by appropriate constraints
8578 but not by the operand predicate.
8580 Offsetting a lo_sum should not be allowed, except where we know by
8581 alignment that a 32k boundary is not crossed, but see the ???
8582 comment in rs6000_legitimize_reload_address. Note that by
8583 "offsetting" here we mean a further offset to access parts of the
8584 MEM. It's fine to have a lo_sum where the inner address is offset
8585 from a sym, since the same sym+offset will appear in the high part
8586 of the address calculation. */
8588 bool
8589 mem_operand_gpr (rtx op, machine_mode mode)
8591 unsigned HOST_WIDE_INT offset;
8592 int extra;
8593 rtx addr = XEXP (op, 0);
8595 op = address_offset (addr);
8596 if (op == NULL_RTX)
8597 return true;
8599 offset = INTVAL (op);
8600 if (TARGET_POWERPC64 && (offset & 3) != 0)
8601 return false;
8603 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
8604 if (extra < 0)
8605 extra = 0;
8607 if (GET_CODE (addr) == LO_SUM)
8608 /* For lo_sum addresses, we must allow any offset except one that
8609 causes a wrap, so test only the low 16 bits. */
8610 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
8612 return offset + 0x8000 < 0x10000u - extra;
8615 /* As above, but for DS-FORM VSX insns. Unlike mem_operand_gpr,
8616 enforce an offset divisible by 4 even for 32-bit. */
8618 bool
8619 mem_operand_ds_form (rtx op, machine_mode mode)
8621 unsigned HOST_WIDE_INT offset;
8622 int extra;
8623 rtx addr = XEXP (op, 0);
8625 if (!offsettable_address_p (false, mode, addr))
8626 return false;
8628 op = address_offset (addr);
8629 if (op == NULL_RTX)
8630 return true;
8632 offset = INTVAL (op);
8633 if ((offset & 3) != 0)
8634 return false;
8636 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
8637 if (extra < 0)
8638 extra = 0;
8640 if (GET_CODE (addr) == LO_SUM)
8641 /* For lo_sum addresses, we must allow any offset except one that
8642 causes a wrap, so test only the low 16 bits. */
8643 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
8645 return offset + 0x8000 < 0x10000u - extra;
8648 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
8650 static bool
8651 reg_offset_addressing_ok_p (machine_mode mode)
8653 switch (mode)
8655 case V16QImode:
8656 case V8HImode:
8657 case V4SFmode:
8658 case V4SImode:
8659 case V2DFmode:
8660 case V2DImode:
8661 case V1TImode:
8662 case TImode:
8663 case TFmode:
8664 case KFmode:
8665 /* AltiVec/VSX vector modes. Only reg+reg addressing was valid until the
8666 ISA 3.0 vector d-form addressing mode was added. While TImode is not
8667 a vector mode, if we want to use the VSX registers to move it around,
8668 we need to restrict ourselves to reg+reg addressing. Similarly for
8669 IEEE 128-bit floating point that is passed in a single vector
8670 register. */
8671 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
8672 return mode_supports_vsx_dform_quad (mode);
8673 break;
8675 case V4HImode:
8676 case V2SImode:
8677 case V1DImode:
8678 case V2SFmode:
8679 /* Paired vector modes. Only reg+reg addressing is valid. */
8680 if (TARGET_PAIRED_FLOAT)
8681 return false;
8682 break;
8684 case SDmode:
8685 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
8686 addressing for the LFIWZX and STFIWX instructions. */
8687 if (TARGET_NO_SDMODE_STACK)
8688 return false;
8689 break;
8691 default:
8692 break;
8695 return true;
8698 static bool
8699 virtual_stack_registers_memory_p (rtx op)
8701 int regnum;
8703 if (GET_CODE (op) == REG)
8704 regnum = REGNO (op);
8706 else if (GET_CODE (op) == PLUS
8707 && GET_CODE (XEXP (op, 0)) == REG
8708 && GET_CODE (XEXP (op, 1)) == CONST_INT)
8709 regnum = REGNO (XEXP (op, 0));
8711 else
8712 return false;
8714 return (regnum >= FIRST_VIRTUAL_REGISTER
8715 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
8718 /* Return true if a MODE sized memory accesses to OP plus OFFSET
8719 is known to not straddle a 32k boundary. This function is used
8720 to determine whether -mcmodel=medium code can use TOC pointer
8721 relative addressing for OP. This means the alignment of the TOC
8722 pointer must also be taken into account, and unfortunately that is
8723 only 8 bytes. */
8725 #ifndef POWERPC64_TOC_POINTER_ALIGNMENT
8726 #define POWERPC64_TOC_POINTER_ALIGNMENT 8
8727 #endif
8729 static bool
8730 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
8731 machine_mode mode)
8733 tree decl;
8734 unsigned HOST_WIDE_INT dsize, dalign, lsb, mask;
8736 if (GET_CODE (op) != SYMBOL_REF)
8737 return false;
8739 /* ISA 3.0 vector d-form addressing is restricted, don't allow
8740 SYMBOL_REF. */
8741 if (mode_supports_vsx_dform_quad (mode))
8742 return false;
8744 dsize = GET_MODE_SIZE (mode);
8745 decl = SYMBOL_REF_DECL (op);
8746 if (!decl)
8748 if (dsize == 0)
8749 return false;
8751 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
8752 replacing memory addresses with an anchor plus offset. We
8753 could find the decl by rummaging around in the block->objects
8754 VEC for the given offset but that seems like too much work. */
8755 dalign = BITS_PER_UNIT;
8756 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
8757 && SYMBOL_REF_ANCHOR_P (op)
8758 && SYMBOL_REF_BLOCK (op) != NULL)
8760 struct object_block *block = SYMBOL_REF_BLOCK (op);
8762 dalign = block->alignment;
8763 offset += SYMBOL_REF_BLOCK_OFFSET (op);
8765 else if (CONSTANT_POOL_ADDRESS_P (op))
8767 /* It would be nice to have get_pool_align().. */
8768 machine_mode cmode = get_pool_mode (op);
8770 dalign = GET_MODE_ALIGNMENT (cmode);
8773 else if (DECL_P (decl))
8775 dalign = DECL_ALIGN (decl);
8777 if (dsize == 0)
8779 /* Allow BLKmode when the entire object is known to not
8780 cross a 32k boundary. */
8781 if (!DECL_SIZE_UNIT (decl))
8782 return false;
8784 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl)))
8785 return false;
8787 dsize = tree_to_uhwi (DECL_SIZE_UNIT (decl));
8788 if (dsize > 32768)
8789 return false;
8791 dalign /= BITS_PER_UNIT;
8792 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
8793 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
8794 return dalign >= dsize;
8797 else
8798 gcc_unreachable ();
8800 /* Find how many bits of the alignment we know for this access. */
8801 dalign /= BITS_PER_UNIT;
8802 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
8803 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
8804 mask = dalign - 1;
8805 lsb = offset & -offset;
8806 mask &= lsb - 1;
8807 dalign = mask + 1;
8809 return dalign >= dsize;
8812 static bool
8813 constant_pool_expr_p (rtx op)
8815 rtx base, offset;
8817 split_const (op, &base, &offset);
8818 return (GET_CODE (base) == SYMBOL_REF
8819 && CONSTANT_POOL_ADDRESS_P (base)
8820 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
8823 static const_rtx tocrel_base, tocrel_offset;
8825 /* Return true if OP is a toc pointer relative address (the output
8826 of create_TOC_reference). If STRICT, do not match non-split
8827 -mcmodel=large/medium toc pointer relative addresses. */
8829 bool
8830 toc_relative_expr_p (const_rtx op, bool strict)
8832 if (!TARGET_TOC)
8833 return false;
8835 if (TARGET_CMODEL != CMODEL_SMALL)
8837 /* When strict ensure we have everything tidy. */
8838 if (strict
8839 && !(GET_CODE (op) == LO_SUM
8840 && REG_P (XEXP (op, 0))
8841 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict)))
8842 return false;
8844 /* When not strict, allow non-split TOC addresses and also allow
8845 (lo_sum (high ..)) TOC addresses created during reload. */
8846 if (GET_CODE (op) == LO_SUM)
8847 op = XEXP (op, 1);
8850 tocrel_base = op;
8851 tocrel_offset = const0_rtx;
8852 if (GET_CODE (op) == PLUS && add_cint_operand (XEXP (op, 1), GET_MODE (op)))
8854 tocrel_base = XEXP (op, 0);
8855 tocrel_offset = XEXP (op, 1);
8858 return (GET_CODE (tocrel_base) == UNSPEC
8859 && XINT (tocrel_base, 1) == UNSPEC_TOCREL);
8862 /* Return true if X is a constant pool address, and also for cmodel=medium
8863 if X is a toc-relative address known to be offsettable within MODE. */
8865 bool
8866 legitimate_constant_pool_address_p (const_rtx x, machine_mode mode,
8867 bool strict)
8869 return (toc_relative_expr_p (x, strict)
8870 && (TARGET_CMODEL != CMODEL_MEDIUM
8871 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
8872 || mode == QImode
8873 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
8874 INTVAL (tocrel_offset), mode)));
8877 static bool
8878 legitimate_small_data_p (machine_mode mode, rtx x)
8880 return (DEFAULT_ABI == ABI_V4
8881 && !flag_pic && !TARGET_TOC
8882 && (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST)
8883 && small_data_operand (x, mode));
8886 /* SPE offset addressing is limited to 5-bits worth of double words. */
8887 #define SPE_CONST_OFFSET_OK(x) (((x) & ~0xf8) == 0)
8889 bool
8890 rs6000_legitimate_offset_address_p (machine_mode mode, rtx x,
8891 bool strict, bool worst_case)
8893 unsigned HOST_WIDE_INT offset;
8894 unsigned int extra;
8896 if (GET_CODE (x) != PLUS)
8897 return false;
8898 if (!REG_P (XEXP (x, 0)))
8899 return false;
8900 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
8901 return false;
8902 if (mode_supports_vsx_dform_quad (mode))
8903 return quad_address_p (x, mode, strict);
8904 if (!reg_offset_addressing_ok_p (mode))
8905 return virtual_stack_registers_memory_p (x);
8906 if (legitimate_constant_pool_address_p (x, mode, strict || lra_in_progress))
8907 return true;
8908 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
8909 return false;
8911 offset = INTVAL (XEXP (x, 1));
8912 extra = 0;
8913 switch (mode)
8915 case V4HImode:
8916 case V2SImode:
8917 case V1DImode:
8918 case V2SFmode:
8919 /* SPE vector modes. */
8920 return SPE_CONST_OFFSET_OK (offset);
8922 case DFmode:
8923 case DDmode:
8924 case DImode:
8925 /* On e500v2, we may have:
8927 (subreg:DF (mem:DI (plus (reg) (const_int))) 0).
8929 Which gets addressed with evldd instructions. */
8930 if (TARGET_E500_DOUBLE)
8931 return SPE_CONST_OFFSET_OK (offset);
8933 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
8934 addressing. */
8935 if (VECTOR_MEM_VSX_P (mode))
8936 return false;
8938 if (!worst_case)
8939 break;
8940 if (!TARGET_POWERPC64)
8941 extra = 4;
8942 else if (offset & 3)
8943 return false;
8944 break;
8946 case TFmode:
8947 case IFmode:
8948 case KFmode:
8949 case TDmode:
8950 case TImode:
8951 case PTImode:
8952 if (TARGET_E500_DOUBLE)
8953 return (SPE_CONST_OFFSET_OK (offset)
8954 && SPE_CONST_OFFSET_OK (offset + 8));
8956 extra = 8;
8957 if (!worst_case)
8958 break;
8959 if (!TARGET_POWERPC64)
8960 extra = 12;
8961 else if (offset & 3)
8962 return false;
8963 break;
8965 default:
8966 break;
8969 offset += 0x8000;
8970 return offset < 0x10000 - extra;
8973 bool
8974 legitimate_indexed_address_p (rtx x, int strict)
8976 rtx op0, op1;
8978 if (GET_CODE (x) != PLUS)
8979 return false;
8981 op0 = XEXP (x, 0);
8982 op1 = XEXP (x, 1);
8984 /* Recognize the rtl generated by reload which we know will later be
8985 replaced with proper base and index regs. */
8986 if (!strict
8987 && reload_in_progress
8988 && (REG_P (op0) || GET_CODE (op0) == PLUS)
8989 && REG_P (op1))
8990 return true;
8992 return (REG_P (op0) && REG_P (op1)
8993 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
8994 && INT_REG_OK_FOR_INDEX_P (op1, strict))
8995 || (INT_REG_OK_FOR_BASE_P (op1, strict)
8996 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
8999 bool
9000 avoiding_indexed_address_p (machine_mode mode)
9002 /* Avoid indexed addressing for modes that have non-indexed
9003 load/store instruction forms. */
9004 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
9007 bool
9008 legitimate_indirect_address_p (rtx x, int strict)
9010 return GET_CODE (x) == REG && INT_REG_OK_FOR_BASE_P (x, strict);
9013 bool
9014 macho_lo_sum_memory_operand (rtx x, machine_mode mode)
9016 if (!TARGET_MACHO || !flag_pic
9017 || mode != SImode || GET_CODE (x) != MEM)
9018 return false;
9019 x = XEXP (x, 0);
9021 if (GET_CODE (x) != LO_SUM)
9022 return false;
9023 if (GET_CODE (XEXP (x, 0)) != REG)
9024 return false;
9025 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
9026 return false;
9027 x = XEXP (x, 1);
9029 return CONSTANT_P (x);
9032 static bool
9033 legitimate_lo_sum_address_p (machine_mode mode, rtx x, int strict)
9035 if (GET_CODE (x) != LO_SUM)
9036 return false;
9037 if (GET_CODE (XEXP (x, 0)) != REG)
9038 return false;
9039 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
9040 return false;
9041 /* quad word addresses are restricted, and we can't use LO_SUM. */
9042 if (mode_supports_vsx_dform_quad (mode))
9043 return false;
9044 /* Restrict addressing for DI because of our SUBREG hackery. */
9045 if (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
9046 return false;
9047 x = XEXP (x, 1);
9049 if (TARGET_ELF || TARGET_MACHO)
9051 bool large_toc_ok;
9053 if (DEFAULT_ABI == ABI_V4 && flag_pic)
9054 return false;
9055 /* LRA doesn't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
9056 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
9057 recognizes some LO_SUM addresses as valid although this
9058 function says opposite. In most cases, LRA through different
9059 transformations can generate correct code for address reloads.
9060 It can not manage only some LO_SUM cases. So we need to add
9061 code analogous to one in rs6000_legitimize_reload_address for
9062 LOW_SUM here saying that some addresses are still valid. */
9063 large_toc_ok = (lra_in_progress && TARGET_CMODEL != CMODEL_SMALL
9064 && small_toc_ref (x, VOIDmode));
9065 if (TARGET_TOC && ! large_toc_ok)
9066 return false;
9067 if (GET_MODE_NUNITS (mode) != 1)
9068 return false;
9069 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
9070 && !(/* ??? Assume floating point reg based on mode? */
9071 TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
9072 && (mode == DFmode || mode == DDmode)))
9073 return false;
9075 return CONSTANT_P (x) || large_toc_ok;
9078 return false;
9082 /* Try machine-dependent ways of modifying an illegitimate address
9083 to be legitimate. If we find one, return the new, valid address.
9084 This is used from only one place: `memory_address' in explow.c.
9086 OLDX is the address as it was before break_out_memory_refs was
9087 called. In some cases it is useful to look at this to decide what
9088 needs to be done.
9090 It is always safe for this function to do nothing. It exists to
9091 recognize opportunities to optimize the output.
9093 On RS/6000, first check for the sum of a register with a constant
9094 integer that is out of range. If so, generate code to add the
9095 constant with the low-order 16 bits masked to the register and force
9096 this result into another register (this can be done with `cau').
9097 Then generate an address of REG+(CONST&0xffff), allowing for the
9098 possibility of bit 16 being a one.
9100 Then check for the sum of a register and something not constant, try to
9101 load the other things into a register and return the sum. */
9103 static rtx
9104 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
9105 machine_mode mode)
9107 unsigned int extra;
9109 if (!reg_offset_addressing_ok_p (mode)
9110 || mode_supports_vsx_dform_quad (mode))
9112 if (virtual_stack_registers_memory_p (x))
9113 return x;
9115 /* In theory we should not be seeing addresses of the form reg+0,
9116 but just in case it is generated, optimize it away. */
9117 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
9118 return force_reg (Pmode, XEXP (x, 0));
9120 /* For TImode with load/store quad, restrict addresses to just a single
9121 pointer, so it works with both GPRs and VSX registers. */
9122 /* Make sure both operands are registers. */
9123 else if (GET_CODE (x) == PLUS
9124 && (mode != TImode || !TARGET_VSX_TIMODE))
9125 return gen_rtx_PLUS (Pmode,
9126 force_reg (Pmode, XEXP (x, 0)),
9127 force_reg (Pmode, XEXP (x, 1)));
9128 else
9129 return force_reg (Pmode, x);
9131 if (GET_CODE (x) == SYMBOL_REF)
9133 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
9134 if (model != 0)
9135 return rs6000_legitimize_tls_address (x, model);
9138 extra = 0;
9139 switch (mode)
9141 case TFmode:
9142 case TDmode:
9143 case TImode:
9144 case PTImode:
9145 case IFmode:
9146 case KFmode:
9147 /* As in legitimate_offset_address_p we do not assume
9148 worst-case. The mode here is just a hint as to the registers
9149 used. A TImode is usually in gprs, but may actually be in
9150 fprs. Leave worst-case scenario for reload to handle via
9151 insn constraints. PTImode is only GPRs. */
9152 extra = 8;
9153 break;
9154 default:
9155 break;
9158 if (GET_CODE (x) == PLUS
9159 && GET_CODE (XEXP (x, 0)) == REG
9160 && GET_CODE (XEXP (x, 1)) == CONST_INT
9161 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
9162 >= 0x10000 - extra)
9163 && !(SPE_VECTOR_MODE (mode)
9164 || (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)))
9166 HOST_WIDE_INT high_int, low_int;
9167 rtx sum;
9168 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
9169 if (low_int >= 0x8000 - extra)
9170 low_int = 0;
9171 high_int = INTVAL (XEXP (x, 1)) - low_int;
9172 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
9173 GEN_INT (high_int)), 0);
9174 return plus_constant (Pmode, sum, low_int);
9176 else if (GET_CODE (x) == PLUS
9177 && GET_CODE (XEXP (x, 0)) == REG
9178 && GET_CODE (XEXP (x, 1)) != CONST_INT
9179 && GET_MODE_NUNITS (mode) == 1
9180 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
9181 || (/* ??? Assume floating point reg based on mode? */
9182 (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
9183 && (mode == DFmode || mode == DDmode)))
9184 && !avoiding_indexed_address_p (mode))
9186 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
9187 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
9189 else if (SPE_VECTOR_MODE (mode)
9190 || (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD))
9192 if (mode == DImode)
9193 return x;
9194 /* We accept [reg + reg] and [reg + OFFSET]. */
9196 if (GET_CODE (x) == PLUS)
9198 rtx op1 = XEXP (x, 0);
9199 rtx op2 = XEXP (x, 1);
9200 rtx y;
9202 op1 = force_reg (Pmode, op1);
9204 if (GET_CODE (op2) != REG
9205 && (GET_CODE (op2) != CONST_INT
9206 || !SPE_CONST_OFFSET_OK (INTVAL (op2))
9207 || (GET_MODE_SIZE (mode) > 8
9208 && !SPE_CONST_OFFSET_OK (INTVAL (op2) + 8))))
9209 op2 = force_reg (Pmode, op2);
9211 /* We can't always do [reg + reg] for these, because [reg +
9212 reg + offset] is not a legitimate addressing mode. */
9213 y = gen_rtx_PLUS (Pmode, op1, op2);
9215 if ((GET_MODE_SIZE (mode) > 8 || mode == DDmode) && REG_P (op2))
9216 return force_reg (Pmode, y);
9217 else
9218 return y;
9221 return force_reg (Pmode, x);
9223 else if ((TARGET_ELF
9224 #if TARGET_MACHO
9225 || !MACHO_DYNAMIC_NO_PIC_P
9226 #endif
9228 && TARGET_32BIT
9229 && TARGET_NO_TOC
9230 && ! flag_pic
9231 && GET_CODE (x) != CONST_INT
9232 && GET_CODE (x) != CONST_WIDE_INT
9233 && GET_CODE (x) != CONST_DOUBLE
9234 && CONSTANT_P (x)
9235 && GET_MODE_NUNITS (mode) == 1
9236 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
9237 || (/* ??? Assume floating point reg based on mode? */
9238 (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
9239 && (mode == DFmode || mode == DDmode))))
9241 rtx reg = gen_reg_rtx (Pmode);
9242 if (TARGET_ELF)
9243 emit_insn (gen_elf_high (reg, x));
9244 else
9245 emit_insn (gen_macho_high (reg, x));
9246 return gen_rtx_LO_SUM (Pmode, reg, x);
9248 else if (TARGET_TOC
9249 && GET_CODE (x) == SYMBOL_REF
9250 && constant_pool_expr_p (x)
9251 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
9252 return create_TOC_reference (x, NULL_RTX);
9253 else
9254 return x;
9257 /* Debug version of rs6000_legitimize_address. */
9258 static rtx
9259 rs6000_debug_legitimize_address (rtx x, rtx oldx, machine_mode mode)
9261 rtx ret;
9262 rtx_insn *insns;
9264 start_sequence ();
9265 ret = rs6000_legitimize_address (x, oldx, mode);
9266 insns = get_insns ();
9267 end_sequence ();
9269 if (ret != x)
9271 fprintf (stderr,
9272 "\nrs6000_legitimize_address: mode %s, old code %s, "
9273 "new code %s, modified\n",
9274 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
9275 GET_RTX_NAME (GET_CODE (ret)));
9277 fprintf (stderr, "Original address:\n");
9278 debug_rtx (x);
9280 fprintf (stderr, "oldx:\n");
9281 debug_rtx (oldx);
9283 fprintf (stderr, "New address:\n");
9284 debug_rtx (ret);
9286 if (insns)
9288 fprintf (stderr, "Insns added:\n");
9289 debug_rtx_list (insns, 20);
9292 else
9294 fprintf (stderr,
9295 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
9296 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
9298 debug_rtx (x);
9301 if (insns)
9302 emit_insn (insns);
9304 return ret;
9307 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
9308 We need to emit DTP-relative relocations. */
9310 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
9311 static void
9312 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
9314 switch (size)
9316 case 4:
9317 fputs ("\t.long\t", file);
9318 break;
9319 case 8:
9320 fputs (DOUBLE_INT_ASM_OP, file);
9321 break;
9322 default:
9323 gcc_unreachable ();
9325 output_addr_const (file, x);
9326 if (TARGET_ELF)
9327 fputs ("@dtprel+0x8000", file);
9328 else if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF)
9330 switch (SYMBOL_REF_TLS_MODEL (x))
9332 case 0:
9333 break;
9334 case TLS_MODEL_LOCAL_EXEC:
9335 fputs ("@le", file);
9336 break;
9337 case TLS_MODEL_INITIAL_EXEC:
9338 fputs ("@ie", file);
9339 break;
9340 case TLS_MODEL_GLOBAL_DYNAMIC:
9341 case TLS_MODEL_LOCAL_DYNAMIC:
9342 fputs ("@m", file);
9343 break;
9344 default:
9345 gcc_unreachable ();
9350 /* Return true if X is a symbol that refers to real (rather than emulated)
9351 TLS. */
9353 static bool
9354 rs6000_real_tls_symbol_ref_p (rtx x)
9356 return (GET_CODE (x) == SYMBOL_REF
9357 && SYMBOL_REF_TLS_MODEL (x) >= TLS_MODEL_REAL);
9360 /* In the name of slightly smaller debug output, and to cater to
9361 general assembler lossage, recognize various UNSPEC sequences
9362 and turn them back into a direct symbol reference. */
9364 static rtx
9365 rs6000_delegitimize_address (rtx orig_x)
9367 rtx x, y, offset;
9369 orig_x = delegitimize_mem_from_attrs (orig_x);
9370 x = orig_x;
9371 if (MEM_P (x))
9372 x = XEXP (x, 0);
9374 y = x;
9375 if (TARGET_CMODEL != CMODEL_SMALL
9376 && GET_CODE (y) == LO_SUM)
9377 y = XEXP (y, 1);
9379 offset = NULL_RTX;
9380 if (GET_CODE (y) == PLUS
9381 && GET_MODE (y) == Pmode
9382 && CONST_INT_P (XEXP (y, 1)))
9384 offset = XEXP (y, 1);
9385 y = XEXP (y, 0);
9388 if (GET_CODE (y) == UNSPEC
9389 && XINT (y, 1) == UNSPEC_TOCREL)
9391 y = XVECEXP (y, 0, 0);
9393 #ifdef HAVE_AS_TLS
9394 /* Do not associate thread-local symbols with the original
9395 constant pool symbol. */
9396 if (TARGET_XCOFF
9397 && GET_CODE (y) == SYMBOL_REF
9398 && CONSTANT_POOL_ADDRESS_P (y)
9399 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y)))
9400 return orig_x;
9401 #endif
9403 if (offset != NULL_RTX)
9404 y = gen_rtx_PLUS (Pmode, y, offset);
9405 if (!MEM_P (orig_x))
9406 return y;
9407 else
9408 return replace_equiv_address_nv (orig_x, y);
9411 if (TARGET_MACHO
9412 && GET_CODE (orig_x) == LO_SUM
9413 && GET_CODE (XEXP (orig_x, 1)) == CONST)
9415 y = XEXP (XEXP (orig_x, 1), 0);
9416 if (GET_CODE (y) == UNSPEC
9417 && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
9418 return XVECEXP (y, 0, 0);
9421 return orig_x;
9424 /* Return true if X shouldn't be emitted into the debug info.
9425 The linker doesn't like .toc section references from
9426 .debug_* sections, so reject .toc section symbols. */
9428 static bool
9429 rs6000_const_not_ok_for_debug_p (rtx x)
9431 if (GET_CODE (x) == SYMBOL_REF
9432 && CONSTANT_POOL_ADDRESS_P (x))
9434 rtx c = get_pool_constant (x);
9435 machine_mode cmode = get_pool_mode (x);
9436 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
9437 return true;
9440 return false;
9444 /* Implement the TARGET_LEGITIMATE_COMBINED_INSN hook. */
9446 static bool
9447 rs6000_legitimate_combined_insn (rtx_insn *insn)
9449 int icode = INSN_CODE (insn);
9451 /* Reject creating doloop insns. Combine should not be allowed
9452 to create these for a number of reasons:
9453 1) In a nested loop, if combine creates one of these in an
9454 outer loop and the register allocator happens to allocate ctr
9455 to the outer loop insn, then the inner loop can't use ctr.
9456 Inner loops ought to be more highly optimized.
9457 2) Combine often wants to create one of these from what was
9458 originally a three insn sequence, first combining the three
9459 insns to two, then to ctrsi/ctrdi. When ctrsi/ctrdi is not
9460 allocated ctr, the splitter takes use back to the three insn
9461 sequence. It's better to stop combine at the two insn
9462 sequence.
9463 3) Faced with not being able to allocate ctr for ctrsi/crtdi
9464 insns, the register allocator sometimes uses floating point
9465 or vector registers for the pseudo. Since ctrsi/ctrdi is a
9466 jump insn and output reloads are not implemented for jumps,
9467 the ctrsi/ctrdi splitters need to handle all possible cases.
9468 That's a pain, and it gets to be seriously difficult when a
9469 splitter that runs after reload needs memory to transfer from
9470 a gpr to fpr. See PR70098 and PR71763 which are not fixed
9471 for the difficult case. It's better to not create problems
9472 in the first place. */
9473 if (icode != CODE_FOR_nothing
9474 && (icode == CODE_FOR_ctrsi_internal1
9475 || icode == CODE_FOR_ctrdi_internal1
9476 || icode == CODE_FOR_ctrsi_internal2
9477 || icode == CODE_FOR_ctrdi_internal2
9478 || icode == CODE_FOR_ctrsi_internal3
9479 || icode == CODE_FOR_ctrdi_internal3
9480 || icode == CODE_FOR_ctrsi_internal4
9481 || icode == CODE_FOR_ctrdi_internal4))
9482 return false;
9484 return true;
9487 /* Construct the SYMBOL_REF for the tls_get_addr function. */
9489 static GTY(()) rtx rs6000_tls_symbol;
9490 static rtx
9491 rs6000_tls_get_addr (void)
9493 if (!rs6000_tls_symbol)
9494 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
9496 return rs6000_tls_symbol;
9499 /* Construct the SYMBOL_REF for TLS GOT references. */
9501 static GTY(()) rtx rs6000_got_symbol;
9502 static rtx
9503 rs6000_got_sym (void)
9505 if (!rs6000_got_symbol)
9507 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
9508 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
9509 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
9512 return rs6000_got_symbol;
9515 /* AIX Thread-Local Address support. */
9517 static rtx
9518 rs6000_legitimize_tls_address_aix (rtx addr, enum tls_model model)
9520 rtx sym, mem, tocref, tlsreg, tmpreg, dest, tlsaddr;
9521 const char *name;
9522 char *tlsname;
9524 name = XSTR (addr, 0);
9525 /* Append TLS CSECT qualifier, unless the symbol already is qualified
9526 or the symbol will be in TLS private data section. */
9527 if (name[strlen (name) - 1] != ']'
9528 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr))
9529 || bss_initializer_p (SYMBOL_REF_DECL (addr))))
9531 tlsname = XALLOCAVEC (char, strlen (name) + 4);
9532 strcpy (tlsname, name);
9533 strcat (tlsname,
9534 bss_initializer_p (SYMBOL_REF_DECL (addr)) ? "[UL]" : "[TL]");
9535 tlsaddr = copy_rtx (addr);
9536 XSTR (tlsaddr, 0) = ggc_strdup (tlsname);
9538 else
9539 tlsaddr = addr;
9541 /* Place addr into TOC constant pool. */
9542 sym = force_const_mem (GET_MODE (tlsaddr), tlsaddr);
9544 /* Output the TOC entry and create the MEM referencing the value. */
9545 if (constant_pool_expr_p (XEXP (sym, 0))
9546 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym, 0)), Pmode))
9548 tocref = create_TOC_reference (XEXP (sym, 0), NULL_RTX);
9549 mem = gen_const_mem (Pmode, tocref);
9550 set_mem_alias_set (mem, get_TOC_alias_set ());
9552 else
9553 return sym;
9555 /* Use global-dynamic for local-dynamic. */
9556 if (model == TLS_MODEL_GLOBAL_DYNAMIC
9557 || model == TLS_MODEL_LOCAL_DYNAMIC)
9559 /* Create new TOC reference for @m symbol. */
9560 name = XSTR (XVECEXP (XEXP (mem, 0), 0, 0), 0);
9561 tlsname = XALLOCAVEC (char, strlen (name) + 1);
9562 strcpy (tlsname, "*LCM");
9563 strcat (tlsname, name + 3);
9564 rtx modaddr = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tlsname));
9565 SYMBOL_REF_FLAGS (modaddr) |= SYMBOL_FLAG_LOCAL;
9566 tocref = create_TOC_reference (modaddr, NULL_RTX);
9567 rtx modmem = gen_const_mem (Pmode, tocref);
9568 set_mem_alias_set (modmem, get_TOC_alias_set ());
9570 rtx modreg = gen_reg_rtx (Pmode);
9571 emit_insn (gen_rtx_SET (modreg, modmem));
9573 tmpreg = gen_reg_rtx (Pmode);
9574 emit_insn (gen_rtx_SET (tmpreg, mem));
9576 dest = gen_reg_rtx (Pmode);
9577 if (TARGET_32BIT)
9578 emit_insn (gen_tls_get_addrsi (dest, modreg, tmpreg));
9579 else
9580 emit_insn (gen_tls_get_addrdi (dest, modreg, tmpreg));
9581 return dest;
9583 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
9584 else if (TARGET_32BIT)
9586 tlsreg = gen_reg_rtx (SImode);
9587 emit_insn (gen_tls_get_tpointer (tlsreg));
9589 else
9590 tlsreg = gen_rtx_REG (DImode, 13);
9592 /* Load the TOC value into temporary register. */
9593 tmpreg = gen_reg_rtx (Pmode);
9594 emit_insn (gen_rtx_SET (tmpreg, mem));
9595 set_unique_reg_note (get_last_insn (), REG_EQUAL,
9596 gen_rtx_MINUS (Pmode, addr, tlsreg));
9598 /* Add TOC symbol value to TLS pointer. */
9599 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tmpreg, tlsreg));
9601 return dest;
9604 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
9605 this (thread-local) address. */
9607 static rtx
9608 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
9610 rtx dest, insn;
9612 if (TARGET_XCOFF)
9613 return rs6000_legitimize_tls_address_aix (addr, model);
9615 dest = gen_reg_rtx (Pmode);
9616 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
9618 rtx tlsreg;
9620 if (TARGET_64BIT)
9622 tlsreg = gen_rtx_REG (Pmode, 13);
9623 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
9625 else
9627 tlsreg = gen_rtx_REG (Pmode, 2);
9628 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
9630 emit_insn (insn);
9632 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
9634 rtx tlsreg, tmp;
9636 tmp = gen_reg_rtx (Pmode);
9637 if (TARGET_64BIT)
9639 tlsreg = gen_rtx_REG (Pmode, 13);
9640 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
9642 else
9644 tlsreg = gen_rtx_REG (Pmode, 2);
9645 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
9647 emit_insn (insn);
9648 if (TARGET_64BIT)
9649 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
9650 else
9651 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
9652 emit_insn (insn);
9654 else
9656 rtx r3, got, tga, tmp1, tmp2, call_insn;
9658 /* We currently use relocations like @got@tlsgd for tls, which
9659 means the linker will handle allocation of tls entries, placing
9660 them in the .got section. So use a pointer to the .got section,
9661 not one to secondary TOC sections used by 64-bit -mminimal-toc,
9662 or to secondary GOT sections used by 32-bit -fPIC. */
9663 if (TARGET_64BIT)
9664 got = gen_rtx_REG (Pmode, 2);
9665 else
9667 if (flag_pic == 1)
9668 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
9669 else
9671 rtx gsym = rs6000_got_sym ();
9672 got = gen_reg_rtx (Pmode);
9673 if (flag_pic == 0)
9674 rs6000_emit_move (got, gsym, Pmode);
9675 else
9677 rtx mem, lab;
9679 tmp1 = gen_reg_rtx (Pmode);
9680 tmp2 = gen_reg_rtx (Pmode);
9681 mem = gen_const_mem (Pmode, tmp1);
9682 lab = gen_label_rtx ();
9683 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
9684 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
9685 if (TARGET_LINK_STACK)
9686 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
9687 emit_move_insn (tmp2, mem);
9688 rtx_insn *last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
9689 set_unique_reg_note (last, REG_EQUAL, gsym);
9694 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
9696 tga = rs6000_tls_get_addr ();
9697 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
9698 1, const0_rtx, Pmode);
9700 r3 = gen_rtx_REG (Pmode, 3);
9701 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9703 if (TARGET_64BIT)
9704 insn = gen_tls_gd_aix64 (r3, got, addr, tga, const0_rtx);
9705 else
9706 insn = gen_tls_gd_aix32 (r3, got, addr, tga, const0_rtx);
9708 else if (DEFAULT_ABI == ABI_V4)
9709 insn = gen_tls_gd_sysvsi (r3, got, addr, tga, const0_rtx);
9710 else
9711 gcc_unreachable ();
9712 call_insn = last_call_insn ();
9713 PATTERN (call_insn) = insn;
9714 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
9715 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
9716 pic_offset_table_rtx);
9718 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
9720 tga = rs6000_tls_get_addr ();
9721 tmp1 = gen_reg_rtx (Pmode);
9722 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
9723 1, const0_rtx, Pmode);
9725 r3 = gen_rtx_REG (Pmode, 3);
9726 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9728 if (TARGET_64BIT)
9729 insn = gen_tls_ld_aix64 (r3, got, tga, const0_rtx);
9730 else
9731 insn = gen_tls_ld_aix32 (r3, got, tga, const0_rtx);
9733 else if (DEFAULT_ABI == ABI_V4)
9734 insn = gen_tls_ld_sysvsi (r3, got, tga, const0_rtx);
9735 else
9736 gcc_unreachable ();
9737 call_insn = last_call_insn ();
9738 PATTERN (call_insn) = insn;
9739 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
9740 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
9741 pic_offset_table_rtx);
9743 if (rs6000_tls_size == 16)
9745 if (TARGET_64BIT)
9746 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
9747 else
9748 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
9750 else if (rs6000_tls_size == 32)
9752 tmp2 = gen_reg_rtx (Pmode);
9753 if (TARGET_64BIT)
9754 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
9755 else
9756 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
9757 emit_insn (insn);
9758 if (TARGET_64BIT)
9759 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
9760 else
9761 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
9763 else
9765 tmp2 = gen_reg_rtx (Pmode);
9766 if (TARGET_64BIT)
9767 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
9768 else
9769 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
9770 emit_insn (insn);
9771 insn = gen_rtx_SET (dest, gen_rtx_PLUS (Pmode, tmp2, tmp1));
9773 emit_insn (insn);
9775 else
9777 /* IE, or 64-bit offset LE. */
9778 tmp2 = gen_reg_rtx (Pmode);
9779 if (TARGET_64BIT)
9780 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
9781 else
9782 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
9783 emit_insn (insn);
9784 if (TARGET_64BIT)
9785 insn = gen_tls_tls_64 (dest, tmp2, addr);
9786 else
9787 insn = gen_tls_tls_32 (dest, tmp2, addr);
9788 emit_insn (insn);
9792 return dest;
9795 /* Only create the global variable for the stack protect guard if we are using
9796 the global flavor of that guard. */
9797 static tree
9798 rs6000_init_stack_protect_guard (void)
9800 if (rs6000_stack_protector_guard == SSP_GLOBAL)
9801 return default_stack_protect_guard ();
9803 return NULL_TREE;
9806 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
9808 static bool
9809 rs6000_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
9811 if (GET_CODE (x) == HIGH
9812 && GET_CODE (XEXP (x, 0)) == UNSPEC)
9813 return true;
9815 /* A TLS symbol in the TOC cannot contain a sum. */
9816 if (GET_CODE (x) == CONST
9817 && GET_CODE (XEXP (x, 0)) == PLUS
9818 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
9819 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) != 0)
9820 return true;
9822 /* Do not place an ELF TLS symbol in the constant pool. */
9823 return TARGET_ELF && tls_referenced_p (x);
9826 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
9827 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
9828 can be addressed relative to the toc pointer. */
9830 static bool
9831 use_toc_relative_ref (rtx sym, machine_mode mode)
9833 return ((constant_pool_expr_p (sym)
9834 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
9835 get_pool_mode (sym)))
9836 || (TARGET_CMODEL == CMODEL_MEDIUM
9837 && SYMBOL_REF_LOCAL_P (sym)
9838 && GET_MODE_SIZE (mode) <= POWERPC64_TOC_POINTER_ALIGNMENT));
9841 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
9842 replace the input X, or the original X if no replacement is called for.
9843 The output parameter *WIN is 1 if the calling macro should goto WIN,
9844 0 if it should not.
9846 For RS/6000, we wish to handle large displacements off a base
9847 register by splitting the addend across an addiu/addis and the mem insn.
9848 This cuts number of extra insns needed from 3 to 1.
9850 On Darwin, we use this to generate code for floating point constants.
9851 A movsf_low is generated so we wind up with 2 instructions rather than 3.
9852 The Darwin code is inside #if TARGET_MACHO because only then are the
9853 machopic_* functions defined. */
9854 static rtx
9855 rs6000_legitimize_reload_address (rtx x, machine_mode mode,
9856 int opnum, int type,
9857 int ind_levels ATTRIBUTE_UNUSED, int *win)
9859 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
9860 bool quad_offset_p = mode_supports_vsx_dform_quad (mode);
9862 /* Nasty hack for vsx_splat_v2df/v2di load from mem, which takes a
9863 DFmode/DImode MEM. Ditto for ISA 3.0 vsx_splat_v4sf/v4si. */
9864 if (reg_offset_p
9865 && opnum == 1
9866 && ((mode == DFmode && recog_data.operand_mode[0] == V2DFmode)
9867 || (mode == DImode && recog_data.operand_mode[0] == V2DImode)
9868 || (mode == SFmode && recog_data.operand_mode[0] == V4SFmode
9869 && TARGET_P9_VECTOR)
9870 || (mode == SImode && recog_data.operand_mode[0] == V4SImode
9871 && TARGET_P9_VECTOR)))
9872 reg_offset_p = false;
9874 /* We must recognize output that we have already generated ourselves. */
9875 if (GET_CODE (x) == PLUS
9876 && GET_CODE (XEXP (x, 0)) == PLUS
9877 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
9878 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
9879 && GET_CODE (XEXP (x, 1)) == CONST_INT)
9881 if (TARGET_DEBUG_ADDR)
9883 fprintf (stderr, "\nlegitimize_reload_address push_reload #1:\n");
9884 debug_rtx (x);
9886 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9887 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
9888 opnum, (enum reload_type) type);
9889 *win = 1;
9890 return x;
9893 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
9894 if (GET_CODE (x) == LO_SUM
9895 && GET_CODE (XEXP (x, 0)) == HIGH)
9897 if (TARGET_DEBUG_ADDR)
9899 fprintf (stderr, "\nlegitimize_reload_address push_reload #2:\n");
9900 debug_rtx (x);
9902 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9903 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9904 opnum, (enum reload_type) type);
9905 *win = 1;
9906 return x;
9909 #if TARGET_MACHO
9910 if (DEFAULT_ABI == ABI_DARWIN && flag_pic
9911 && GET_CODE (x) == LO_SUM
9912 && GET_CODE (XEXP (x, 0)) == PLUS
9913 && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
9914 && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
9915 && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
9916 && machopic_operand_p (XEXP (x, 1)))
9918 /* Result of previous invocation of this function on Darwin
9919 floating point constant. */
9920 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9921 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9922 opnum, (enum reload_type) type);
9923 *win = 1;
9924 return x;
9926 #endif
9928 if (TARGET_CMODEL != CMODEL_SMALL
9929 && reg_offset_p
9930 && !quad_offset_p
9931 && small_toc_ref (x, VOIDmode))
9933 rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
9934 x = gen_rtx_LO_SUM (Pmode, hi, x);
9935 if (TARGET_DEBUG_ADDR)
9937 fprintf (stderr, "\nlegitimize_reload_address push_reload #3:\n");
9938 debug_rtx (x);
9940 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9941 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9942 opnum, (enum reload_type) type);
9943 *win = 1;
9944 return x;
9947 if (GET_CODE (x) == PLUS
9948 && REG_P (XEXP (x, 0))
9949 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
9950 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
9951 && CONST_INT_P (XEXP (x, 1))
9952 && reg_offset_p
9953 && !SPE_VECTOR_MODE (mode)
9954 && !(TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
9955 && (quad_offset_p || !VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode)))
9957 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
9958 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
9959 HOST_WIDE_INT high
9960 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
9962 /* Check for 32-bit overflow or quad addresses with one of the
9963 four least significant bits set. */
9964 if (high + low != val
9965 || (quad_offset_p && (low & 0xf)))
9967 *win = 0;
9968 return x;
9971 /* Reload the high part into a base reg; leave the low part
9972 in the mem directly. */
9974 x = gen_rtx_PLUS (GET_MODE (x),
9975 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
9976 GEN_INT (high)),
9977 GEN_INT (low));
9979 if (TARGET_DEBUG_ADDR)
9981 fprintf (stderr, "\nlegitimize_reload_address push_reload #4:\n");
9982 debug_rtx (x);
9984 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9985 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
9986 opnum, (enum reload_type) type);
9987 *win = 1;
9988 return x;
9991 if (GET_CODE (x) == SYMBOL_REF
9992 && reg_offset_p
9993 && !quad_offset_p
9994 && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode))
9995 && !SPE_VECTOR_MODE (mode)
9996 #if TARGET_MACHO
9997 && DEFAULT_ABI == ABI_DARWIN
9998 && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
9999 && machopic_symbol_defined_p (x)
10000 #else
10001 && DEFAULT_ABI == ABI_V4
10002 && !flag_pic
10003 #endif
10004 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
10005 The same goes for DImode without 64-bit gprs and DFmode and DDmode
10006 without fprs.
10007 ??? Assume floating point reg based on mode? This assumption is
10008 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
10009 where reload ends up doing a DFmode load of a constant from
10010 mem using two gprs. Unfortunately, at this point reload
10011 hasn't yet selected regs so poking around in reload data
10012 won't help and even if we could figure out the regs reliably,
10013 we'd still want to allow this transformation when the mem is
10014 naturally aligned. Since we say the address is good here, we
10015 can't disable offsets from LO_SUMs in mem_operand_gpr.
10016 FIXME: Allow offset from lo_sum for other modes too, when
10017 mem is sufficiently aligned.
10019 Also disallow this if the type can go in VMX/Altivec registers, since
10020 those registers do not have d-form (reg+offset) address modes. */
10021 && !reg_addr[mode].scalar_in_vmx_p
10022 && mode != TFmode
10023 && mode != TDmode
10024 && mode != IFmode
10025 && mode != KFmode
10026 && (mode != TImode || !TARGET_VSX_TIMODE)
10027 && mode != PTImode
10028 && (mode != DImode || TARGET_POWERPC64)
10029 && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
10030 || (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)))
10032 #if TARGET_MACHO
10033 if (flag_pic)
10035 rtx offset = machopic_gen_offset (x);
10036 x = gen_rtx_LO_SUM (GET_MODE (x),
10037 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
10038 gen_rtx_HIGH (Pmode, offset)), offset);
10040 else
10041 #endif
10042 x = gen_rtx_LO_SUM (GET_MODE (x),
10043 gen_rtx_HIGH (Pmode, x), x);
10045 if (TARGET_DEBUG_ADDR)
10047 fprintf (stderr, "\nlegitimize_reload_address push_reload #5:\n");
10048 debug_rtx (x);
10050 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
10051 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
10052 opnum, (enum reload_type) type);
10053 *win = 1;
10054 return x;
10057 /* Reload an offset address wrapped by an AND that represents the
10058 masking of the lower bits. Strip the outer AND and let reload
10059 convert the offset address into an indirect address. For VSX,
10060 force reload to create the address with an AND in a separate
10061 register, because we can't guarantee an altivec register will
10062 be used. */
10063 if (VECTOR_MEM_ALTIVEC_P (mode)
10064 && GET_CODE (x) == AND
10065 && GET_CODE (XEXP (x, 0)) == PLUS
10066 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
10067 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
10068 && GET_CODE (XEXP (x, 1)) == CONST_INT
10069 && INTVAL (XEXP (x, 1)) == -16)
10071 x = XEXP (x, 0);
10072 *win = 1;
10073 return x;
10076 if (TARGET_TOC
10077 && reg_offset_p
10078 && !quad_offset_p
10079 && GET_CODE (x) == SYMBOL_REF
10080 && use_toc_relative_ref (x, mode))
10082 x = create_TOC_reference (x, NULL_RTX);
10083 if (TARGET_CMODEL != CMODEL_SMALL)
10085 if (TARGET_DEBUG_ADDR)
10087 fprintf (stderr, "\nlegitimize_reload_address push_reload #6:\n");
10088 debug_rtx (x);
10090 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
10091 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
10092 opnum, (enum reload_type) type);
10094 *win = 1;
10095 return x;
10097 *win = 0;
10098 return x;
10101 /* Debug version of rs6000_legitimize_reload_address. */
10102 static rtx
10103 rs6000_debug_legitimize_reload_address (rtx x, machine_mode mode,
10104 int opnum, int type,
10105 int ind_levels, int *win)
10107 rtx ret = rs6000_legitimize_reload_address (x, mode, opnum, type,
10108 ind_levels, win);
10109 fprintf (stderr,
10110 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
10111 "type = %d, ind_levels = %d, win = %d, original addr:\n",
10112 GET_MODE_NAME (mode), opnum, type, ind_levels, *win);
10113 debug_rtx (x);
10115 if (x == ret)
10116 fprintf (stderr, "Same address returned\n");
10117 else if (!ret)
10118 fprintf (stderr, "NULL returned\n");
10119 else
10121 fprintf (stderr, "New address:\n");
10122 debug_rtx (ret);
10125 return ret;
10128 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
10129 that is a valid memory address for an instruction.
10130 The MODE argument is the machine mode for the MEM expression
10131 that wants to use this address.
10133 On the RS/6000, there are four valid address: a SYMBOL_REF that
10134 refers to a constant pool entry of an address (or the sum of it
10135 plus a constant), a short (16-bit signed) constant plus a register,
10136 the sum of two registers, or a register indirect, possibly with an
10137 auto-increment. For DFmode, DDmode and DImode with a constant plus
10138 register, we must ensure that both words are addressable or PowerPC64
10139 with offset word aligned.
10141 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
10142 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
10143 because adjacent memory cells are accessed by adding word-sized offsets
10144 during assembly output. */
10145 static bool
10146 rs6000_legitimate_address_p (machine_mode mode, rtx x, bool reg_ok_strict)
10148 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
10149 bool quad_offset_p = mode_supports_vsx_dform_quad (mode);
10151 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
10152 if (VECTOR_MEM_ALTIVEC_P (mode)
10153 && GET_CODE (x) == AND
10154 && GET_CODE (XEXP (x, 1)) == CONST_INT
10155 && INTVAL (XEXP (x, 1)) == -16)
10156 x = XEXP (x, 0);
10158 if (TARGET_ELF && RS6000_SYMBOL_REF_TLS_P (x))
10159 return 0;
10160 if (legitimate_indirect_address_p (x, reg_ok_strict))
10161 return 1;
10162 if (TARGET_UPDATE
10163 && (GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
10164 && mode_supports_pre_incdec_p (mode)
10165 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
10166 return 1;
10167 /* Handle restricted vector d-form offsets in ISA 3.0. */
10168 if (quad_offset_p)
10170 if (quad_address_p (x, mode, reg_ok_strict))
10171 return 1;
10173 else if (virtual_stack_registers_memory_p (x))
10174 return 1;
10176 else if (reg_offset_p)
10178 if (legitimate_small_data_p (mode, x))
10179 return 1;
10180 if (legitimate_constant_pool_address_p (x, mode,
10181 reg_ok_strict || lra_in_progress))
10182 return 1;
10183 if (reg_addr[mode].fused_toc && GET_CODE (x) == UNSPEC
10184 && XINT (x, 1) == UNSPEC_FUSION_ADDIS)
10185 return 1;
10188 /* For TImode, if we have TImode in VSX registers, only allow register
10189 indirect addresses. This will allow the values to go in either GPRs
10190 or VSX registers without reloading. The vector types would tend to
10191 go into VSX registers, so we allow REG+REG, while TImode seems
10192 somewhat split, in that some uses are GPR based, and some VSX based. */
10193 /* FIXME: We could loosen this by changing the following to
10194 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX_TIMODE)
10195 but currently we cannot allow REG+REG addressing for TImode. See
10196 PR72827 for complete details on how this ends up hoodwinking DSE. */
10197 if (mode == TImode && TARGET_VSX_TIMODE)
10198 return 0;
10199 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
10200 if (! reg_ok_strict
10201 && reg_offset_p
10202 && GET_CODE (x) == PLUS
10203 && GET_CODE (XEXP (x, 0)) == REG
10204 && (XEXP (x, 0) == virtual_stack_vars_rtx
10205 || XEXP (x, 0) == arg_pointer_rtx)
10206 && GET_CODE (XEXP (x, 1)) == CONST_INT)
10207 return 1;
10208 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
10209 return 1;
10210 if (!FLOAT128_2REG_P (mode)
10211 && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
10212 || TARGET_POWERPC64
10213 || (mode != DFmode && mode != DDmode)
10214 || (TARGET_E500_DOUBLE && mode != DDmode))
10215 && (TARGET_POWERPC64 || mode != DImode)
10216 && (mode != TImode || VECTOR_MEM_VSX_P (TImode))
10217 && mode != PTImode
10218 && !avoiding_indexed_address_p (mode)
10219 && legitimate_indexed_address_p (x, reg_ok_strict))
10220 return 1;
10221 if (TARGET_UPDATE && GET_CODE (x) == PRE_MODIFY
10222 && mode_supports_pre_modify_p (mode)
10223 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
10224 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
10225 reg_ok_strict, false)
10226 || (!avoiding_indexed_address_p (mode)
10227 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
10228 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
10229 return 1;
10230 if (reg_offset_p && !quad_offset_p
10231 && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
10232 return 1;
10233 return 0;
10236 /* Debug version of rs6000_legitimate_address_p. */
10237 static bool
10238 rs6000_debug_legitimate_address_p (machine_mode mode, rtx x,
10239 bool reg_ok_strict)
10241 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
10242 fprintf (stderr,
10243 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
10244 "strict = %d, reload = %s, code = %s\n",
10245 ret ? "true" : "false",
10246 GET_MODE_NAME (mode),
10247 reg_ok_strict,
10248 (reload_completed
10249 ? "after"
10250 : (reload_in_progress ? "progress" : "before")),
10251 GET_RTX_NAME (GET_CODE (x)));
10252 debug_rtx (x);
10254 return ret;
10257 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
10259 static bool
10260 rs6000_mode_dependent_address_p (const_rtx addr,
10261 addr_space_t as ATTRIBUTE_UNUSED)
10263 return rs6000_mode_dependent_address_ptr (addr);
10266 /* Go to LABEL if ADDR (a legitimate address expression)
10267 has an effect that depends on the machine mode it is used for.
10269 On the RS/6000 this is true of all integral offsets (since AltiVec
10270 and VSX modes don't allow them) or is a pre-increment or decrement.
10272 ??? Except that due to conceptual problems in offsettable_address_p
10273 we can't really report the problems of integral offsets. So leave
10274 this assuming that the adjustable offset must be valid for the
10275 sub-words of a TFmode operand, which is what we had before. */
10277 static bool
10278 rs6000_mode_dependent_address (const_rtx addr)
10280 switch (GET_CODE (addr))
10282 case PLUS:
10283 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
10284 is considered a legitimate address before reload, so there
10285 are no offset restrictions in that case. Note that this
10286 condition is safe in strict mode because any address involving
10287 virtual_stack_vars_rtx or arg_pointer_rtx would already have
10288 been rejected as illegitimate. */
10289 if (XEXP (addr, 0) != virtual_stack_vars_rtx
10290 && XEXP (addr, 0) != arg_pointer_rtx
10291 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
10293 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
10294 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
10296 break;
10298 case LO_SUM:
10299 /* Anything in the constant pool is sufficiently aligned that
10300 all bytes have the same high part address. */
10301 return !legitimate_constant_pool_address_p (addr, QImode, false);
10303 /* Auto-increment cases are now treated generically in recog.c. */
10304 case PRE_MODIFY:
10305 return TARGET_UPDATE;
10307 /* AND is only allowed in Altivec loads. */
10308 case AND:
10309 return true;
10311 default:
10312 break;
10315 return false;
10318 /* Debug version of rs6000_mode_dependent_address. */
10319 static bool
10320 rs6000_debug_mode_dependent_address (const_rtx addr)
10322 bool ret = rs6000_mode_dependent_address (addr);
10324 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
10325 ret ? "true" : "false");
10326 debug_rtx (addr);
10328 return ret;
10331 /* Implement FIND_BASE_TERM. */
10334 rs6000_find_base_term (rtx op)
10336 rtx base;
10338 base = op;
10339 if (GET_CODE (base) == CONST)
10340 base = XEXP (base, 0);
10341 if (GET_CODE (base) == PLUS)
10342 base = XEXP (base, 0);
10343 if (GET_CODE (base) == UNSPEC)
10344 switch (XINT (base, 1))
10346 case UNSPEC_TOCREL:
10347 case UNSPEC_MACHOPIC_OFFSET:
10348 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
10349 for aliasing purposes. */
10350 return XVECEXP (base, 0, 0);
10353 return op;
10356 /* More elaborate version of recog's offsettable_memref_p predicate
10357 that works around the ??? note of rs6000_mode_dependent_address.
10358 In particular it accepts
10360 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
10362 in 32-bit mode, that the recog predicate rejects. */
10364 static bool
10365 rs6000_offsettable_memref_p (rtx op, machine_mode reg_mode)
10367 bool worst_case;
10369 if (!MEM_P (op))
10370 return false;
10372 /* First mimic offsettable_memref_p. */
10373 if (offsettable_address_p (true, GET_MODE (op), XEXP (op, 0)))
10374 return true;
10376 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
10377 the latter predicate knows nothing about the mode of the memory
10378 reference and, therefore, assumes that it is the largest supported
10379 mode (TFmode). As a consequence, legitimate offsettable memory
10380 references are rejected. rs6000_legitimate_offset_address_p contains
10381 the correct logic for the PLUS case of rs6000_mode_dependent_address,
10382 at least with a little bit of help here given that we know the
10383 actual registers used. */
10384 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
10385 || GET_MODE_SIZE (reg_mode) == 4);
10386 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
10387 true, worst_case);
10390 /* Determine the reassociation width to be used in reassociate_bb.
10391 This takes into account how many parallel operations we
10392 can actually do of a given type, and also the latency.
10394 int add/sub 6/cycle
10395 mul 2/cycle
10396 vect add/sub/mul 2/cycle
10397 fp add/sub/mul 2/cycle
10398 dfp 1/cycle
10401 static int
10402 rs6000_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED,
10403 machine_mode mode)
10405 switch (rs6000_cpu)
10407 case PROCESSOR_POWER8:
10408 case PROCESSOR_POWER9:
10409 if (DECIMAL_FLOAT_MODE_P (mode))
10410 return 1;
10411 if (VECTOR_MODE_P (mode))
10412 return 4;
10413 if (INTEGRAL_MODE_P (mode))
10414 return opc == MULT_EXPR ? 4 : 6;
10415 if (FLOAT_MODE_P (mode))
10416 return 4;
10417 break;
10418 default:
10419 break;
10421 return 1;
10424 /* Change register usage conditional on target flags. */
10425 static void
10426 rs6000_conditional_register_usage (void)
10428 int i;
10430 if (TARGET_DEBUG_TARGET)
10431 fprintf (stderr, "rs6000_conditional_register_usage called\n");
10433 /* Set MQ register fixed (already call_used) so that it will not be
10434 allocated. */
10435 fixed_regs[64] = 1;
10437 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
10438 if (TARGET_64BIT)
10439 fixed_regs[13] = call_used_regs[13]
10440 = call_really_used_regs[13] = 1;
10442 /* Conditionally disable FPRs. */
10443 if (TARGET_SOFT_FLOAT || !TARGET_FPRS)
10444 for (i = 32; i < 64; i++)
10445 fixed_regs[i] = call_used_regs[i]
10446 = call_really_used_regs[i] = 1;
10448 /* The TOC register is not killed across calls in a way that is
10449 visible to the compiler. */
10450 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
10451 call_really_used_regs[2] = 0;
10453 if (DEFAULT_ABI == ABI_V4 && flag_pic == 2)
10454 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10456 if (DEFAULT_ABI == ABI_V4 && flag_pic == 1)
10457 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10458 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10459 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10461 if (DEFAULT_ABI == ABI_DARWIN && flag_pic)
10462 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10463 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10464 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10466 if (TARGET_TOC && TARGET_MINIMAL_TOC)
10467 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10468 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10470 if (TARGET_SPE)
10472 global_regs[SPEFSCR_REGNO] = 1;
10473 /* We used to use r14 as FIXED_SCRATCH to address SPE 64-bit
10474 registers in prologues and epilogues. We no longer use r14
10475 for FIXED_SCRATCH, but we're keeping r14 out of the allocation
10476 pool for link-compatibility with older versions of GCC. Once
10477 "old" code has died out, we can return r14 to the allocation
10478 pool. */
10479 fixed_regs[14]
10480 = call_used_regs[14]
10481 = call_really_used_regs[14] = 1;
10484 if (!TARGET_ALTIVEC && !TARGET_VSX)
10486 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
10487 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
10488 call_really_used_regs[VRSAVE_REGNO] = 1;
10491 if (TARGET_ALTIVEC || TARGET_VSX)
10492 global_regs[VSCR_REGNO] = 1;
10494 if (TARGET_ALTIVEC_ABI)
10496 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
10497 call_used_regs[i] = call_really_used_regs[i] = 1;
10499 /* AIX reserves VR20:31 in non-extended ABI mode. */
10500 if (TARGET_XCOFF)
10501 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
10502 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
10507 /* Output insns to set DEST equal to the constant SOURCE as a series of
10508 lis, ori and shl instructions and return TRUE. */
10510 bool
10511 rs6000_emit_set_const (rtx dest, rtx source)
10513 machine_mode mode = GET_MODE (dest);
10514 rtx temp, set;
10515 rtx_insn *insn;
10516 HOST_WIDE_INT c;
10518 gcc_checking_assert (CONST_INT_P (source));
10519 c = INTVAL (source);
10520 switch (mode)
10522 case QImode:
10523 case HImode:
10524 emit_insn (gen_rtx_SET (dest, source));
10525 return true;
10527 case SImode:
10528 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
10530 emit_insn (gen_rtx_SET (copy_rtx (temp),
10531 GEN_INT (c & ~(HOST_WIDE_INT) 0xffff)));
10532 emit_insn (gen_rtx_SET (dest,
10533 gen_rtx_IOR (SImode, copy_rtx (temp),
10534 GEN_INT (c & 0xffff))));
10535 break;
10537 case DImode:
10538 if (!TARGET_POWERPC64)
10540 rtx hi, lo;
10542 hi = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN == 0,
10543 DImode);
10544 lo = operand_subword_force (dest, WORDS_BIG_ENDIAN != 0,
10545 DImode);
10546 emit_move_insn (hi, GEN_INT (c >> 32));
10547 c = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
10548 emit_move_insn (lo, GEN_INT (c));
10550 else
10551 rs6000_emit_set_long_const (dest, c);
10552 break;
10554 default:
10555 gcc_unreachable ();
10558 insn = get_last_insn ();
10559 set = single_set (insn);
10560 if (! CONSTANT_P (SET_SRC (set)))
10561 set_unique_reg_note (insn, REG_EQUAL, GEN_INT (c));
10563 return true;
10566 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
10567 Output insns to set DEST equal to the constant C as a series of
10568 lis, ori and shl instructions. */
10570 static void
10571 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c)
10573 rtx temp;
10574 HOST_WIDE_INT ud1, ud2, ud3, ud4;
10576 ud1 = c & 0xffff;
10577 c = c >> 16;
10578 ud2 = c & 0xffff;
10579 c = c >> 16;
10580 ud3 = c & 0xffff;
10581 c = c >> 16;
10582 ud4 = c & 0xffff;
10584 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
10585 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
10586 emit_move_insn (dest, GEN_INT ((ud1 ^ 0x8000) - 0x8000));
10588 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
10589 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
10591 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10593 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
10594 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
10595 if (ud1 != 0)
10596 emit_move_insn (dest,
10597 gen_rtx_IOR (DImode, copy_rtx (temp),
10598 GEN_INT (ud1)));
10600 else if (ud3 == 0 && ud4 == 0)
10602 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10604 gcc_assert (ud2 & 0x8000);
10605 emit_move_insn (copy_rtx (temp),
10606 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
10607 if (ud1 != 0)
10608 emit_move_insn (copy_rtx (temp),
10609 gen_rtx_IOR (DImode, copy_rtx (temp),
10610 GEN_INT (ud1)));
10611 emit_move_insn (dest,
10612 gen_rtx_ZERO_EXTEND (DImode,
10613 gen_lowpart (SImode,
10614 copy_rtx (temp))));
10616 else if ((ud4 == 0xffff && (ud3 & 0x8000))
10617 || (ud4 == 0 && ! (ud3 & 0x8000)))
10619 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10621 emit_move_insn (copy_rtx (temp),
10622 GEN_INT (((ud3 << 16) ^ 0x80000000) - 0x80000000));
10623 if (ud2 != 0)
10624 emit_move_insn (copy_rtx (temp),
10625 gen_rtx_IOR (DImode, copy_rtx (temp),
10626 GEN_INT (ud2)));
10627 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
10628 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
10629 GEN_INT (16)));
10630 if (ud1 != 0)
10631 emit_move_insn (dest,
10632 gen_rtx_IOR (DImode, copy_rtx (temp),
10633 GEN_INT (ud1)));
10635 else
10637 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10639 emit_move_insn (copy_rtx (temp),
10640 GEN_INT (((ud4 << 16) ^ 0x80000000) - 0x80000000));
10641 if (ud3 != 0)
10642 emit_move_insn (copy_rtx (temp),
10643 gen_rtx_IOR (DImode, copy_rtx (temp),
10644 GEN_INT (ud3)));
10646 emit_move_insn (ud2 != 0 || ud1 != 0 ? copy_rtx (temp) : dest,
10647 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
10648 GEN_INT (32)));
10649 if (ud2 != 0)
10650 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
10651 gen_rtx_IOR (DImode, copy_rtx (temp),
10652 GEN_INT (ud2 << 16)));
10653 if (ud1 != 0)
10654 emit_move_insn (dest,
10655 gen_rtx_IOR (DImode, copy_rtx (temp),
10656 GEN_INT (ud1)));
10660 /* Helper for the following. Get rid of [r+r] memory refs
10661 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
10663 static void
10664 rs6000_eliminate_indexed_memrefs (rtx operands[2])
10666 if (reload_in_progress)
10667 return;
10669 if (GET_CODE (operands[0]) == MEM
10670 && GET_CODE (XEXP (operands[0], 0)) != REG
10671 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
10672 GET_MODE (operands[0]), false))
10673 operands[0]
10674 = replace_equiv_address (operands[0],
10675 copy_addr_to_reg (XEXP (operands[0], 0)));
10677 if (GET_CODE (operands[1]) == MEM
10678 && GET_CODE (XEXP (operands[1], 0)) != REG
10679 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
10680 GET_MODE (operands[1]), false))
10681 operands[1]
10682 = replace_equiv_address (operands[1],
10683 copy_addr_to_reg (XEXP (operands[1], 0)));
10686 /* Generate a vector of constants to permute MODE for a little-endian
10687 storage operation by swapping the two halves of a vector. */
10688 static rtvec
10689 rs6000_const_vec (machine_mode mode)
10691 int i, subparts;
10692 rtvec v;
10694 switch (mode)
10696 case V1TImode:
10697 subparts = 1;
10698 break;
10699 case V2DFmode:
10700 case V2DImode:
10701 subparts = 2;
10702 break;
10703 case V4SFmode:
10704 case V4SImode:
10705 subparts = 4;
10706 break;
10707 case V8HImode:
10708 subparts = 8;
10709 break;
10710 case V16QImode:
10711 subparts = 16;
10712 break;
10713 default:
10714 gcc_unreachable();
10717 v = rtvec_alloc (subparts);
10719 for (i = 0; i < subparts / 2; ++i)
10720 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i + subparts / 2);
10721 for (i = subparts / 2; i < subparts; ++i)
10722 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i - subparts / 2);
10724 return v;
10727 /* Generate a permute rtx that represents an lxvd2x, stxvd2x, or xxpermdi
10728 for a VSX load or store operation. */
10730 rs6000_gen_le_vsx_permute (rtx source, machine_mode mode)
10732 /* Use ROTATE instead of VEC_SELECT on IEEE 128-bit floating point, and
10733 128-bit integers if they are allowed in VSX registers. */
10734 if (FLOAT128_VECTOR_P (mode) || mode == TImode || mode == V1TImode)
10735 return gen_rtx_ROTATE (mode, source, GEN_INT (64));
10736 else
10738 rtx par = gen_rtx_PARALLEL (VOIDmode, rs6000_const_vec (mode));
10739 return gen_rtx_VEC_SELECT (mode, source, par);
10743 /* Emit a little-endian load from vector memory location SOURCE to VSX
10744 register DEST in mode MODE. The load is done with two permuting
10745 insn's that represent an lxvd2x and xxpermdi. */
10746 void
10747 rs6000_emit_le_vsx_load (rtx dest, rtx source, machine_mode mode)
10749 rtx tmp, permute_mem, permute_reg;
10751 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
10752 V1TImode). */
10753 if (mode == TImode || mode == V1TImode)
10755 mode = V2DImode;
10756 dest = gen_lowpart (V2DImode, dest);
10757 source = adjust_address (source, V2DImode, 0);
10760 tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest) : dest;
10761 permute_mem = rs6000_gen_le_vsx_permute (source, mode);
10762 permute_reg = rs6000_gen_le_vsx_permute (tmp, mode);
10763 emit_insn (gen_rtx_SET (tmp, permute_mem));
10764 emit_insn (gen_rtx_SET (dest, permute_reg));
10767 /* Emit a little-endian store to vector memory location DEST from VSX
10768 register SOURCE in mode MODE. The store is done with two permuting
10769 insn's that represent an xxpermdi and an stxvd2x. */
10770 void
10771 rs6000_emit_le_vsx_store (rtx dest, rtx source, machine_mode mode)
10773 rtx tmp, permute_src, permute_tmp;
10775 /* This should never be called during or after reload, because it does
10776 not re-permute the source register. It is intended only for use
10777 during expand. */
10778 gcc_assert (!reload_in_progress && !lra_in_progress && !reload_completed);
10780 /* Use V2DImode to do swaps of types with 128-bit scalar parts (TImode,
10781 V1TImode). */
10782 if (mode == TImode || mode == V1TImode)
10784 mode = V2DImode;
10785 dest = adjust_address (dest, V2DImode, 0);
10786 source = gen_lowpart (V2DImode, source);
10789 tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source) : source;
10790 permute_src = rs6000_gen_le_vsx_permute (source, mode);
10791 permute_tmp = rs6000_gen_le_vsx_permute (tmp, mode);
10792 emit_insn (gen_rtx_SET (tmp, permute_src));
10793 emit_insn (gen_rtx_SET (dest, permute_tmp));
10796 /* Emit a sequence representing a little-endian VSX load or store,
10797 moving data from SOURCE to DEST in mode MODE. This is done
10798 separately from rs6000_emit_move to ensure it is called only
10799 during expand. LE VSX loads and stores introduced later are
10800 handled with a split. The expand-time RTL generation allows
10801 us to optimize away redundant pairs of register-permutes. */
10802 void
10803 rs6000_emit_le_vsx_move (rtx dest, rtx source, machine_mode mode)
10805 gcc_assert (!BYTES_BIG_ENDIAN
10806 && VECTOR_MEM_VSX_P (mode)
10807 && !TARGET_P9_VECTOR
10808 && !gpr_or_gpr_p (dest, source)
10809 && (MEM_P (source) ^ MEM_P (dest)));
10811 if (MEM_P (source))
10813 gcc_assert (REG_P (dest) || GET_CODE (dest) == SUBREG);
10814 rs6000_emit_le_vsx_load (dest, source, mode);
10816 else
10818 if (!REG_P (source))
10819 source = force_reg (mode, source);
10820 rs6000_emit_le_vsx_store (dest, source, mode);
10824 /* Return whether a SFmode or SImode move can be done without converting one
10825 mode to another. This arrises when we have:
10827 (SUBREG:SF (REG:SI ...))
10828 (SUBREG:SI (REG:SF ...))
10830 and one of the values is in a floating point/vector register, where SFmode
10831 scalars are stored in DFmode format. */
10833 bool
10834 valid_sf_si_move (rtx dest, rtx src, machine_mode mode)
10836 if (TARGET_ALLOW_SF_SUBREG)
10837 return true;
10839 if (mode != SFmode && GET_MODE_CLASS (mode) != MODE_INT)
10840 return true;
10842 if (!SUBREG_P (src) || !sf_subreg_operand (src, mode))
10843 return true;
10845 /*. Allow (set (SUBREG:SI (REG:SF)) (SUBREG:SI (REG:SF))). */
10846 if (SUBREG_P (dest))
10848 rtx dest_subreg = SUBREG_REG (dest);
10849 rtx src_subreg = SUBREG_REG (src);
10850 return GET_MODE (dest_subreg) == GET_MODE (src_subreg);
10853 return false;
10857 /* Helper function to change moves with:
10859 (SUBREG:SF (REG:SI)) and
10860 (SUBREG:SI (REG:SF))
10862 into separate UNSPEC insns. In the PowerPC architecture, scalar SFmode
10863 values are stored as DFmode values in the VSX registers. We need to convert
10864 the bits before we can use a direct move or operate on the bits in the
10865 vector register as an integer type.
10867 Skip things like (set (SUBREG:SI (...) (SUBREG:SI (...)). */
10869 static bool
10870 rs6000_emit_move_si_sf_subreg (rtx dest, rtx source, machine_mode mode)
10872 if (TARGET_DIRECT_MOVE_64BIT && !reload_in_progress && !reload_completed
10873 && !lra_in_progress
10874 && (!SUBREG_P (dest) || !sf_subreg_operand (dest, mode))
10875 && SUBREG_P (source) && sf_subreg_operand (source, mode))
10877 rtx inner_source = SUBREG_REG (source);
10878 machine_mode inner_mode = GET_MODE (inner_source);
10880 if (mode == SImode && inner_mode == SFmode)
10882 emit_insn (gen_movsi_from_sf (dest, inner_source));
10883 return true;
10886 if (mode == SFmode && inner_mode == SImode)
10888 emit_insn (gen_movsf_from_si (dest, inner_source));
10889 return true;
10893 return false;
10896 /* Emit a move from SOURCE to DEST in mode MODE. */
10897 void
10898 rs6000_emit_move (rtx dest, rtx source, machine_mode mode)
10900 rtx operands[2];
10901 operands[0] = dest;
10902 operands[1] = source;
10904 if (TARGET_DEBUG_ADDR)
10906 fprintf (stderr,
10907 "\nrs6000_emit_move: mode = %s, reload_in_progress = %d, "
10908 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
10909 GET_MODE_NAME (mode),
10910 reload_in_progress,
10911 reload_completed,
10912 can_create_pseudo_p ());
10913 debug_rtx (dest);
10914 fprintf (stderr, "source:\n");
10915 debug_rtx (source);
10918 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
10919 if (CONST_WIDE_INT_P (operands[1])
10920 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
10922 /* This should be fixed with the introduction of CONST_WIDE_INT. */
10923 gcc_unreachable ();
10926 /* See if we need to special case SImode/SFmode SUBREG moves. */
10927 if ((mode == SImode || mode == SFmode) && SUBREG_P (source)
10928 && rs6000_emit_move_si_sf_subreg (dest, source, mode))
10929 return;
10931 /* Check if GCC is setting up a block move that will end up using FP
10932 registers as temporaries. We must make sure this is acceptable. */
10933 if (GET_CODE (operands[0]) == MEM
10934 && GET_CODE (operands[1]) == MEM
10935 && mode == DImode
10936 && (SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[0]))
10937 || SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[1])))
10938 && ! (SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[0]) > 32
10939 ? 32 : MEM_ALIGN (operands[0])))
10940 || SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[1]) > 32
10941 ? 32
10942 : MEM_ALIGN (operands[1]))))
10943 && ! MEM_VOLATILE_P (operands [0])
10944 && ! MEM_VOLATILE_P (operands [1]))
10946 emit_move_insn (adjust_address (operands[0], SImode, 0),
10947 adjust_address (operands[1], SImode, 0));
10948 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
10949 adjust_address (copy_rtx (operands[1]), SImode, 4));
10950 return;
10953 if (can_create_pseudo_p () && GET_CODE (operands[0]) == MEM
10954 && !gpc_reg_operand (operands[1], mode))
10955 operands[1] = force_reg (mode, operands[1]);
10957 /* Recognize the case where operand[1] is a reference to thread-local
10958 data and load its address to a register. */
10959 if (tls_referenced_p (operands[1]))
10961 enum tls_model model;
10962 rtx tmp = operands[1];
10963 rtx addend = NULL;
10965 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
10967 addend = XEXP (XEXP (tmp, 0), 1);
10968 tmp = XEXP (XEXP (tmp, 0), 0);
10971 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
10972 model = SYMBOL_REF_TLS_MODEL (tmp);
10973 gcc_assert (model != 0);
10975 tmp = rs6000_legitimize_tls_address (tmp, model);
10976 if (addend)
10978 tmp = gen_rtx_PLUS (mode, tmp, addend);
10979 tmp = force_operand (tmp, operands[0]);
10981 operands[1] = tmp;
10984 /* Handle the case where reload calls us with an invalid address. */
10985 if (reload_in_progress && mode == Pmode
10986 && (! general_operand (operands[1], mode)
10987 || ! nonimmediate_operand (operands[0], mode)))
10988 goto emit_set;
10990 /* 128-bit constant floating-point values on Darwin should really be loaded
10991 as two parts. However, this premature splitting is a problem when DFmode
10992 values can go into Altivec registers. */
10993 if (FLOAT128_IBM_P (mode) && !reg_addr[DFmode].scalar_in_vmx_p
10994 && GET_CODE (operands[1]) == CONST_DOUBLE)
10996 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
10997 simplify_gen_subreg (DFmode, operands[1], mode, 0),
10998 DFmode);
10999 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
11000 GET_MODE_SIZE (DFmode)),
11001 simplify_gen_subreg (DFmode, operands[1], mode,
11002 GET_MODE_SIZE (DFmode)),
11003 DFmode);
11004 return;
11007 if (reload_in_progress && cfun->machine->sdmode_stack_slot != NULL_RTX)
11008 cfun->machine->sdmode_stack_slot =
11009 eliminate_regs (cfun->machine->sdmode_stack_slot, VOIDmode, NULL_RTX);
11012 /* Transform (p0:DD, (SUBREG:DD p1:SD)) to ((SUBREG:SD p0:DD),
11013 p1:SD) if p1 is not of floating point class and p0 is spilled as
11014 we can have no analogous movsd_store for this. */
11015 if (lra_in_progress && mode == DDmode
11016 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
11017 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
11018 && GET_CODE (operands[1]) == SUBREG && REG_P (SUBREG_REG (operands[1]))
11019 && GET_MODE (SUBREG_REG (operands[1])) == SDmode)
11021 enum reg_class cl;
11022 int regno = REGNO (SUBREG_REG (operands[1]));
11024 if (regno >= FIRST_PSEUDO_REGISTER)
11026 cl = reg_preferred_class (regno);
11027 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][1];
11029 if (regno >= 0 && ! FP_REGNO_P (regno))
11031 mode = SDmode;
11032 operands[0] = gen_lowpart_SUBREG (SDmode, operands[0]);
11033 operands[1] = SUBREG_REG (operands[1]);
11036 if (lra_in_progress
11037 && mode == SDmode
11038 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
11039 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
11040 && (REG_P (operands[1])
11041 || (GET_CODE (operands[1]) == SUBREG
11042 && REG_P (SUBREG_REG (operands[1])))))
11044 int regno = REGNO (GET_CODE (operands[1]) == SUBREG
11045 ? SUBREG_REG (operands[1]) : operands[1]);
11046 enum reg_class cl;
11048 if (regno >= FIRST_PSEUDO_REGISTER)
11050 cl = reg_preferred_class (regno);
11051 gcc_assert (cl != NO_REGS);
11052 regno = ira_class_hard_regs[cl][0];
11054 if (FP_REGNO_P (regno))
11056 if (GET_MODE (operands[0]) != DDmode)
11057 operands[0] = gen_rtx_SUBREG (DDmode, operands[0], 0);
11058 emit_insn (gen_movsd_store (operands[0], operands[1]));
11060 else if (INT_REGNO_P (regno))
11061 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
11062 else
11063 gcc_unreachable();
11064 return;
11066 /* Transform ((SUBREG:DD p0:SD), p1:DD) to (p0:SD, (SUBREG:SD
11067 p:DD)) if p0 is not of floating point class and p1 is spilled as
11068 we can have no analogous movsd_load for this. */
11069 if (lra_in_progress && mode == DDmode
11070 && GET_CODE (operands[0]) == SUBREG && REG_P (SUBREG_REG (operands[0]))
11071 && GET_MODE (SUBREG_REG (operands[0])) == SDmode
11072 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
11073 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
11075 enum reg_class cl;
11076 int regno = REGNO (SUBREG_REG (operands[0]));
11078 if (regno >= FIRST_PSEUDO_REGISTER)
11080 cl = reg_preferred_class (regno);
11081 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][0];
11083 if (regno >= 0 && ! FP_REGNO_P (regno))
11085 mode = SDmode;
11086 operands[0] = SUBREG_REG (operands[0]);
11087 operands[1] = gen_lowpart_SUBREG (SDmode, operands[1]);
11090 if (lra_in_progress
11091 && mode == SDmode
11092 && (REG_P (operands[0])
11093 || (GET_CODE (operands[0]) == SUBREG
11094 && REG_P (SUBREG_REG (operands[0]))))
11095 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
11096 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
11098 int regno = REGNO (GET_CODE (operands[0]) == SUBREG
11099 ? SUBREG_REG (operands[0]) : operands[0]);
11100 enum reg_class cl;
11102 if (regno >= FIRST_PSEUDO_REGISTER)
11104 cl = reg_preferred_class (regno);
11105 gcc_assert (cl != NO_REGS);
11106 regno = ira_class_hard_regs[cl][0];
11108 if (FP_REGNO_P (regno))
11110 if (GET_MODE (operands[1]) != DDmode)
11111 operands[1] = gen_rtx_SUBREG (DDmode, operands[1], 0);
11112 emit_insn (gen_movsd_load (operands[0], operands[1]));
11114 else if (INT_REGNO_P (regno))
11115 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
11116 else
11117 gcc_unreachable();
11118 return;
11121 if (reload_in_progress
11122 && mode == SDmode
11123 && cfun->machine->sdmode_stack_slot != NULL_RTX
11124 && MEM_P (operands[0])
11125 && rtx_equal_p (operands[0], cfun->machine->sdmode_stack_slot)
11126 && REG_P (operands[1]))
11128 if (FP_REGNO_P (REGNO (operands[1])))
11130 rtx mem = adjust_address_nv (operands[0], DDmode, 0);
11131 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
11132 emit_insn (gen_movsd_store (mem, operands[1]));
11134 else if (INT_REGNO_P (REGNO (operands[1])))
11136 rtx mem = operands[0];
11137 if (BYTES_BIG_ENDIAN)
11138 mem = adjust_address_nv (mem, mode, 4);
11139 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
11140 emit_insn (gen_movsd_hardfloat (mem, operands[1]));
11142 else
11143 gcc_unreachable();
11144 return;
11146 if (reload_in_progress
11147 && mode == SDmode
11148 && REG_P (operands[0])
11149 && MEM_P (operands[1])
11150 && cfun->machine->sdmode_stack_slot != NULL_RTX
11151 && rtx_equal_p (operands[1], cfun->machine->sdmode_stack_slot))
11153 if (FP_REGNO_P (REGNO (operands[0])))
11155 rtx mem = adjust_address_nv (operands[1], DDmode, 0);
11156 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
11157 emit_insn (gen_movsd_load (operands[0], mem));
11159 else if (INT_REGNO_P (REGNO (operands[0])))
11161 rtx mem = operands[1];
11162 if (BYTES_BIG_ENDIAN)
11163 mem = adjust_address_nv (mem, mode, 4);
11164 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
11165 emit_insn (gen_movsd_hardfloat (operands[0], mem));
11167 else
11168 gcc_unreachable();
11169 return;
11172 /* FIXME: In the long term, this switch statement should go away
11173 and be replaced by a sequence of tests based on things like
11174 mode == Pmode. */
11175 switch (mode)
11177 case HImode:
11178 case QImode:
11179 if (CONSTANT_P (operands[1])
11180 && GET_CODE (operands[1]) != CONST_INT)
11181 operands[1] = force_const_mem (mode, operands[1]);
11182 break;
11184 case TFmode:
11185 case TDmode:
11186 case IFmode:
11187 case KFmode:
11188 if (FLOAT128_2REG_P (mode))
11189 rs6000_eliminate_indexed_memrefs (operands);
11190 /* fall through */
11192 case DFmode:
11193 case DDmode:
11194 case SFmode:
11195 case SDmode:
11196 if (CONSTANT_P (operands[1])
11197 && ! easy_fp_constant (operands[1], mode))
11198 operands[1] = force_const_mem (mode, operands[1]);
11199 break;
11201 case V16QImode:
11202 case V8HImode:
11203 case V4SFmode:
11204 case V4SImode:
11205 case V4HImode:
11206 case V2SFmode:
11207 case V2SImode:
11208 case V1DImode:
11209 case V2DFmode:
11210 case V2DImode:
11211 case V1TImode:
11212 if (CONSTANT_P (operands[1])
11213 && !easy_vector_constant (operands[1], mode))
11214 operands[1] = force_const_mem (mode, operands[1]);
11215 break;
11217 case SImode:
11218 case DImode:
11219 /* Use default pattern for address of ELF small data */
11220 if (TARGET_ELF
11221 && mode == Pmode
11222 && DEFAULT_ABI == ABI_V4
11223 && (GET_CODE (operands[1]) == SYMBOL_REF
11224 || GET_CODE (operands[1]) == CONST)
11225 && small_data_operand (operands[1], mode))
11227 emit_insn (gen_rtx_SET (operands[0], operands[1]));
11228 return;
11231 if (DEFAULT_ABI == ABI_V4
11232 && mode == Pmode && mode == SImode
11233 && flag_pic == 1 && got_operand (operands[1], mode))
11235 emit_insn (gen_movsi_got (operands[0], operands[1]));
11236 return;
11239 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
11240 && TARGET_NO_TOC
11241 && ! flag_pic
11242 && mode == Pmode
11243 && CONSTANT_P (operands[1])
11244 && GET_CODE (operands[1]) != HIGH
11245 && GET_CODE (operands[1]) != CONST_INT)
11247 rtx target = (!can_create_pseudo_p ()
11248 ? operands[0]
11249 : gen_reg_rtx (mode));
11251 /* If this is a function address on -mcall-aixdesc,
11252 convert it to the address of the descriptor. */
11253 if (DEFAULT_ABI == ABI_AIX
11254 && GET_CODE (operands[1]) == SYMBOL_REF
11255 && XSTR (operands[1], 0)[0] == '.')
11257 const char *name = XSTR (operands[1], 0);
11258 rtx new_ref;
11259 while (*name == '.')
11260 name++;
11261 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
11262 CONSTANT_POOL_ADDRESS_P (new_ref)
11263 = CONSTANT_POOL_ADDRESS_P (operands[1]);
11264 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
11265 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
11266 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
11267 operands[1] = new_ref;
11270 if (DEFAULT_ABI == ABI_DARWIN)
11272 #if TARGET_MACHO
11273 if (MACHO_DYNAMIC_NO_PIC_P)
11275 /* Take care of any required data indirection. */
11276 operands[1] = rs6000_machopic_legitimize_pic_address (
11277 operands[1], mode, operands[0]);
11278 if (operands[0] != operands[1])
11279 emit_insn (gen_rtx_SET (operands[0], operands[1]));
11280 return;
11282 #endif
11283 emit_insn (gen_macho_high (target, operands[1]));
11284 emit_insn (gen_macho_low (operands[0], target, operands[1]));
11285 return;
11288 emit_insn (gen_elf_high (target, operands[1]));
11289 emit_insn (gen_elf_low (operands[0], target, operands[1]));
11290 return;
11293 /* If this is a SYMBOL_REF that refers to a constant pool entry,
11294 and we have put it in the TOC, we just need to make a TOC-relative
11295 reference to it. */
11296 if (TARGET_TOC
11297 && GET_CODE (operands[1]) == SYMBOL_REF
11298 && use_toc_relative_ref (operands[1], mode))
11299 operands[1] = create_TOC_reference (operands[1], operands[0]);
11300 else if (mode == Pmode
11301 && CONSTANT_P (operands[1])
11302 && GET_CODE (operands[1]) != HIGH
11303 && ((GET_CODE (operands[1]) != CONST_INT
11304 && ! easy_fp_constant (operands[1], mode))
11305 || (GET_CODE (operands[1]) == CONST_INT
11306 && (num_insns_constant (operands[1], mode)
11307 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
11308 || (GET_CODE (operands[0]) == REG
11309 && FP_REGNO_P (REGNO (operands[0]))))
11310 && !toc_relative_expr_p (operands[1], false)
11311 && (TARGET_CMODEL == CMODEL_SMALL
11312 || can_create_pseudo_p ()
11313 || (REG_P (operands[0])
11314 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
11317 #if TARGET_MACHO
11318 /* Darwin uses a special PIC legitimizer. */
11319 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
11321 operands[1] =
11322 rs6000_machopic_legitimize_pic_address (operands[1], mode,
11323 operands[0]);
11324 if (operands[0] != operands[1])
11325 emit_insn (gen_rtx_SET (operands[0], operands[1]));
11326 return;
11328 #endif
11330 /* If we are to limit the number of things we put in the TOC and
11331 this is a symbol plus a constant we can add in one insn,
11332 just put the symbol in the TOC and add the constant. Don't do
11333 this if reload is in progress. */
11334 if (GET_CODE (operands[1]) == CONST
11335 && TARGET_NO_SUM_IN_TOC && ! reload_in_progress
11336 && GET_CODE (XEXP (operands[1], 0)) == PLUS
11337 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
11338 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
11339 || GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF)
11340 && ! side_effects_p (operands[0]))
11342 rtx sym =
11343 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
11344 rtx other = XEXP (XEXP (operands[1], 0), 1);
11346 sym = force_reg (mode, sym);
11347 emit_insn (gen_add3_insn (operands[0], sym, other));
11348 return;
11351 operands[1] = force_const_mem (mode, operands[1]);
11353 if (TARGET_TOC
11354 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
11355 && use_toc_relative_ref (XEXP (operands[1], 0), mode))
11357 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
11358 operands[0]);
11359 operands[1] = gen_const_mem (mode, tocref);
11360 set_mem_alias_set (operands[1], get_TOC_alias_set ());
11363 break;
11365 case TImode:
11366 if (!VECTOR_MEM_VSX_P (TImode))
11367 rs6000_eliminate_indexed_memrefs (operands);
11368 break;
11370 case PTImode:
11371 rs6000_eliminate_indexed_memrefs (operands);
11372 break;
11374 default:
11375 fatal_insn ("bad move", gen_rtx_SET (dest, source));
11378 /* Above, we may have called force_const_mem which may have returned
11379 an invalid address. If we can, fix this up; otherwise, reload will
11380 have to deal with it. */
11381 if (GET_CODE (operands[1]) == MEM && ! reload_in_progress)
11382 operands[1] = validize_mem (operands[1]);
11384 emit_set:
11385 emit_insn (gen_rtx_SET (operands[0], operands[1]));
11388 /* Return true if a structure, union or array containing FIELD should be
11389 accessed using `BLKMODE'.
11391 For the SPE, simd types are V2SI, and gcc can be tempted to put the
11392 entire thing in a DI and use subregs to access the internals.
11393 store_bit_field() will force (subreg:DI (reg:V2SI x))'s to the
11394 back-end. Because a single GPR can hold a V2SI, but not a DI, the
11395 best thing to do is set structs to BLKmode and avoid Severe Tire
11396 Damage.
11398 On e500 v2, DF and DI modes suffer from the same anomaly. DF can
11399 fit into 1, whereas DI still needs two. */
11401 static bool
11402 rs6000_member_type_forces_blk (const_tree field, machine_mode mode)
11404 return ((TARGET_SPE && TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
11405 || (TARGET_E500_DOUBLE && mode == DFmode));
11408 /* Nonzero if we can use a floating-point register to pass this arg. */
11409 #define USE_FP_FOR_ARG_P(CUM,MODE) \
11410 (SCALAR_FLOAT_MODE_NOT_VECTOR_P (MODE) \
11411 && (CUM)->fregno <= FP_ARG_MAX_REG \
11412 && TARGET_HARD_FLOAT && TARGET_FPRS)
11414 /* Nonzero if we can use an AltiVec register to pass this arg. */
11415 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
11416 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
11417 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
11418 && TARGET_ALTIVEC_ABI \
11419 && (NAMED))
11421 /* Walk down the type tree of TYPE counting consecutive base elements.
11422 If *MODEP is VOIDmode, then set it to the first valid floating point
11423 or vector type. If a non-floating point or vector type is found, or
11424 if a floating point or vector type that doesn't match a non-VOIDmode
11425 *MODEP is found, then return -1, otherwise return the count in the
11426 sub-tree. */
11428 static int
11429 rs6000_aggregate_candidate (const_tree type, machine_mode *modep)
11431 machine_mode mode;
11432 HOST_WIDE_INT size;
11434 switch (TREE_CODE (type))
11436 case REAL_TYPE:
11437 mode = TYPE_MODE (type);
11438 if (!SCALAR_FLOAT_MODE_P (mode))
11439 return -1;
11441 if (*modep == VOIDmode)
11442 *modep = mode;
11444 if (*modep == mode)
11445 return 1;
11447 break;
11449 case COMPLEX_TYPE:
11450 mode = TYPE_MODE (TREE_TYPE (type));
11451 if (!SCALAR_FLOAT_MODE_P (mode))
11452 return -1;
11454 if (*modep == VOIDmode)
11455 *modep = mode;
11457 if (*modep == mode)
11458 return 2;
11460 break;
11462 case VECTOR_TYPE:
11463 if (!TARGET_ALTIVEC_ABI || !TARGET_ALTIVEC)
11464 return -1;
11466 /* Use V4SImode as representative of all 128-bit vector types. */
11467 size = int_size_in_bytes (type);
11468 switch (size)
11470 case 16:
11471 mode = V4SImode;
11472 break;
11473 default:
11474 return -1;
11477 if (*modep == VOIDmode)
11478 *modep = mode;
11480 /* Vector modes are considered to be opaque: two vectors are
11481 equivalent for the purposes of being homogeneous aggregates
11482 if they are the same size. */
11483 if (*modep == mode)
11484 return 1;
11486 break;
11488 case ARRAY_TYPE:
11490 int count;
11491 tree index = TYPE_DOMAIN (type);
11493 /* Can't handle incomplete types nor sizes that are not
11494 fixed. */
11495 if (!COMPLETE_TYPE_P (type)
11496 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
11497 return -1;
11499 count = rs6000_aggregate_candidate (TREE_TYPE (type), modep);
11500 if (count == -1
11501 || !index
11502 || !TYPE_MAX_VALUE (index)
11503 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
11504 || !TYPE_MIN_VALUE (index)
11505 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
11506 || count < 0)
11507 return -1;
11509 count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
11510 - tree_to_uhwi (TYPE_MIN_VALUE (index)));
11512 /* There must be no padding. */
11513 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
11514 return -1;
11516 return count;
11519 case RECORD_TYPE:
11521 int count = 0;
11522 int sub_count;
11523 tree field;
11525 /* Can't handle incomplete types nor sizes that are not
11526 fixed. */
11527 if (!COMPLETE_TYPE_P (type)
11528 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
11529 return -1;
11531 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
11533 if (TREE_CODE (field) != FIELD_DECL)
11534 continue;
11536 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
11537 if (sub_count < 0)
11538 return -1;
11539 count += sub_count;
11542 /* There must be no padding. */
11543 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
11544 return -1;
11546 return count;
11549 case UNION_TYPE:
11550 case QUAL_UNION_TYPE:
11552 /* These aren't very interesting except in a degenerate case. */
11553 int count = 0;
11554 int sub_count;
11555 tree field;
11557 /* Can't handle incomplete types nor sizes that are not
11558 fixed. */
11559 if (!COMPLETE_TYPE_P (type)
11560 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
11561 return -1;
11563 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
11565 if (TREE_CODE (field) != FIELD_DECL)
11566 continue;
11568 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
11569 if (sub_count < 0)
11570 return -1;
11571 count = count > sub_count ? count : sub_count;
11574 /* There must be no padding. */
11575 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
11576 return -1;
11578 return count;
11581 default:
11582 break;
11585 return -1;
11588 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
11589 float or vector aggregate that shall be passed in FP/vector registers
11590 according to the ELFv2 ABI, return the homogeneous element mode in
11591 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
11593 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
11595 static bool
11596 rs6000_discover_homogeneous_aggregate (machine_mode mode, const_tree type,
11597 machine_mode *elt_mode,
11598 int *n_elts)
11600 /* Note that we do not accept complex types at the top level as
11601 homogeneous aggregates; these types are handled via the
11602 targetm.calls.split_complex_arg mechanism. Complex types
11603 can be elements of homogeneous aggregates, however. */
11604 if (DEFAULT_ABI == ABI_ELFv2 && type && AGGREGATE_TYPE_P (type))
11606 machine_mode field_mode = VOIDmode;
11607 int field_count = rs6000_aggregate_candidate (type, &field_mode);
11609 if (field_count > 0)
11611 int n_regs = (SCALAR_FLOAT_MODE_P (field_mode) ?
11612 (GET_MODE_SIZE (field_mode) + 7) >> 3 : 1);
11614 /* The ELFv2 ABI allows homogeneous aggregates to occupy
11615 up to AGGR_ARG_NUM_REG registers. */
11616 if (field_count * n_regs <= AGGR_ARG_NUM_REG)
11618 if (elt_mode)
11619 *elt_mode = field_mode;
11620 if (n_elts)
11621 *n_elts = field_count;
11622 return true;
11627 if (elt_mode)
11628 *elt_mode = mode;
11629 if (n_elts)
11630 *n_elts = 1;
11631 return false;
11634 /* Return a nonzero value to say to return the function value in
11635 memory, just as large structures are always returned. TYPE will be
11636 the data type of the value, and FNTYPE will be the type of the
11637 function doing the returning, or @code{NULL} for libcalls.
11639 The AIX ABI for the RS/6000 specifies that all structures are
11640 returned in memory. The Darwin ABI does the same.
11642 For the Darwin 64 Bit ABI, a function result can be returned in
11643 registers or in memory, depending on the size of the return data
11644 type. If it is returned in registers, the value occupies the same
11645 registers as it would if it were the first and only function
11646 argument. Otherwise, the function places its result in memory at
11647 the location pointed to by GPR3.
11649 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
11650 but a draft put them in memory, and GCC used to implement the draft
11651 instead of the final standard. Therefore, aix_struct_return
11652 controls this instead of DEFAULT_ABI; V.4 targets needing backward
11653 compatibility can change DRAFT_V4_STRUCT_RET to override the
11654 default, and -m switches get the final word. See
11655 rs6000_option_override_internal for more details.
11657 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
11658 long double support is enabled. These values are returned in memory.
11660 int_size_in_bytes returns -1 for variable size objects, which go in
11661 memory always. The cast to unsigned makes -1 > 8. */
11663 static bool
11664 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
11666 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
11667 if (TARGET_MACHO
11668 && rs6000_darwin64_abi
11669 && TREE_CODE (type) == RECORD_TYPE
11670 && int_size_in_bytes (type) > 0)
11672 CUMULATIVE_ARGS valcum;
11673 rtx valret;
11675 valcum.words = 0;
11676 valcum.fregno = FP_ARG_MIN_REG;
11677 valcum.vregno = ALTIVEC_ARG_MIN_REG;
11678 /* Do a trial code generation as if this were going to be passed
11679 as an argument; if any part goes in memory, we return NULL. */
11680 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
11681 if (valret)
11682 return false;
11683 /* Otherwise fall through to more conventional ABI rules. */
11686 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
11687 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type), type,
11688 NULL, NULL))
11689 return false;
11691 /* The ELFv2 ABI returns aggregates up to 16B in registers */
11692 if (DEFAULT_ABI == ABI_ELFv2 && AGGREGATE_TYPE_P (type)
11693 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) <= 16)
11694 return false;
11696 if (AGGREGATE_TYPE_P (type)
11697 && (aix_struct_return
11698 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
11699 return true;
11701 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
11702 modes only exist for GCC vector types if -maltivec. */
11703 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
11704 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
11705 return false;
11707 /* Return synthetic vectors in memory. */
11708 if (TREE_CODE (type) == VECTOR_TYPE
11709 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
11711 static bool warned_for_return_big_vectors = false;
11712 if (!warned_for_return_big_vectors)
11714 warning (OPT_Wpsabi, "GCC vector returned by reference: "
11715 "non-standard ABI extension with no compatibility guarantee");
11716 warned_for_return_big_vectors = true;
11718 return true;
11721 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
11722 && FLOAT128_IEEE_P (TYPE_MODE (type)))
11723 return true;
11725 return false;
11728 /* Specify whether values returned in registers should be at the most
11729 significant end of a register. We want aggregates returned by
11730 value to match the way aggregates are passed to functions. */
11732 static bool
11733 rs6000_return_in_msb (const_tree valtype)
11735 return (DEFAULT_ABI == ABI_ELFv2
11736 && BYTES_BIG_ENDIAN
11737 && AGGREGATE_TYPE_P (valtype)
11738 && FUNCTION_ARG_PADDING (TYPE_MODE (valtype), valtype) == upward);
11741 #ifdef HAVE_AS_GNU_ATTRIBUTE
11742 /* Return TRUE if a call to function FNDECL may be one that
11743 potentially affects the function calling ABI of the object file. */
11745 static bool
11746 call_ABI_of_interest (tree fndecl)
11748 if (rs6000_gnu_attr && symtab->state == EXPANSION)
11750 struct cgraph_node *c_node;
11752 /* Libcalls are always interesting. */
11753 if (fndecl == NULL_TREE)
11754 return true;
11756 /* Any call to an external function is interesting. */
11757 if (DECL_EXTERNAL (fndecl))
11758 return true;
11760 /* Interesting functions that we are emitting in this object file. */
11761 c_node = cgraph_node::get (fndecl);
11762 c_node = c_node->ultimate_alias_target ();
11763 return !c_node->only_called_directly_p ();
11765 return false;
11767 #endif
11769 /* Initialize a variable CUM of type CUMULATIVE_ARGS
11770 for a call to a function whose data type is FNTYPE.
11771 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
11773 For incoming args we set the number of arguments in the prototype large
11774 so we never return a PARALLEL. */
11776 void
11777 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
11778 rtx libname ATTRIBUTE_UNUSED, int incoming,
11779 int libcall, int n_named_args,
11780 tree fndecl ATTRIBUTE_UNUSED,
11781 machine_mode return_mode ATTRIBUTE_UNUSED)
11783 static CUMULATIVE_ARGS zero_cumulative;
11785 *cum = zero_cumulative;
11786 cum->words = 0;
11787 cum->fregno = FP_ARG_MIN_REG;
11788 cum->vregno = ALTIVEC_ARG_MIN_REG;
11789 cum->prototype = (fntype && prototype_p (fntype));
11790 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
11791 ? CALL_LIBCALL : CALL_NORMAL);
11792 cum->sysv_gregno = GP_ARG_MIN_REG;
11793 cum->stdarg = stdarg_p (fntype);
11794 cum->libcall = libcall;
11796 cum->nargs_prototype = 0;
11797 if (incoming || cum->prototype)
11798 cum->nargs_prototype = n_named_args;
11800 /* Check for a longcall attribute. */
11801 if ((!fntype && rs6000_default_long_calls)
11802 || (fntype
11803 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
11804 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
11805 cum->call_cookie |= CALL_LONG;
11807 if (TARGET_DEBUG_ARG)
11809 fprintf (stderr, "\ninit_cumulative_args:");
11810 if (fntype)
11812 tree ret_type = TREE_TYPE (fntype);
11813 fprintf (stderr, " ret code = %s,",
11814 get_tree_code_name (TREE_CODE (ret_type)));
11817 if (cum->call_cookie & CALL_LONG)
11818 fprintf (stderr, " longcall,");
11820 fprintf (stderr, " proto = %d, nargs = %d\n",
11821 cum->prototype, cum->nargs_prototype);
11824 #ifdef HAVE_AS_GNU_ATTRIBUTE
11825 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4))
11827 cum->escapes = call_ABI_of_interest (fndecl);
11828 if (cum->escapes)
11830 tree return_type;
11832 if (fntype)
11834 return_type = TREE_TYPE (fntype);
11835 return_mode = TYPE_MODE (return_type);
11837 else
11838 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
11840 if (return_type != NULL)
11842 if (TREE_CODE (return_type) == RECORD_TYPE
11843 && TYPE_TRANSPARENT_AGGR (return_type))
11845 return_type = TREE_TYPE (first_field (return_type));
11846 return_mode = TYPE_MODE (return_type);
11848 if (AGGREGATE_TYPE_P (return_type)
11849 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
11850 <= 8))
11851 rs6000_returns_struct = true;
11853 if (SCALAR_FLOAT_MODE_P (return_mode))
11855 rs6000_passes_float = true;
11856 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
11857 && (FLOAT128_IBM_P (return_mode)
11858 || FLOAT128_IEEE_P (return_mode)
11859 || (return_type != NULL
11860 && (TYPE_MAIN_VARIANT (return_type)
11861 == long_double_type_node))))
11862 rs6000_passes_long_double = true;
11864 if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode)
11865 || SPE_VECTOR_MODE (return_mode))
11866 rs6000_passes_vector = true;
11869 #endif
11871 if (fntype
11872 && !TARGET_ALTIVEC
11873 && TARGET_ALTIVEC_ABI
11874 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
11876 error ("cannot return value in vector register because"
11877 " altivec instructions are disabled, use -maltivec"
11878 " to enable them");
11882 /* The mode the ABI uses for a word. This is not the same as word_mode
11883 for -m32 -mpowerpc64. This is used to implement various target hooks. */
11885 static machine_mode
11886 rs6000_abi_word_mode (void)
11888 return TARGET_32BIT ? SImode : DImode;
11891 /* Implement the TARGET_OFFLOAD_OPTIONS hook. */
11892 static char *
11893 rs6000_offload_options (void)
11895 if (TARGET_64BIT)
11896 return xstrdup ("-foffload-abi=lp64");
11897 else
11898 return xstrdup ("-foffload-abi=ilp32");
11901 /* On rs6000, function arguments are promoted, as are function return
11902 values. */
11904 static machine_mode
11905 rs6000_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
11906 machine_mode mode,
11907 int *punsignedp ATTRIBUTE_UNUSED,
11908 const_tree, int)
11910 PROMOTE_MODE (mode, *punsignedp, type);
11912 return mode;
11915 /* Return true if TYPE must be passed on the stack and not in registers. */
11917 static bool
11918 rs6000_must_pass_in_stack (machine_mode mode, const_tree type)
11920 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2 || TARGET_64BIT)
11921 return must_pass_in_stack_var_size (mode, type);
11922 else
11923 return must_pass_in_stack_var_size_or_pad (mode, type);
11926 static inline bool
11927 is_complex_IBM_long_double (machine_mode mode)
11929 return mode == ICmode || (!TARGET_IEEEQUAD && mode == TCmode);
11932 /* Whether ABI_V4 passes MODE args to a function in floating point
11933 registers. */
11935 static bool
11936 abi_v4_pass_in_fpr (machine_mode mode)
11938 if (!TARGET_FPRS || !TARGET_HARD_FLOAT)
11939 return false;
11940 if (TARGET_SINGLE_FLOAT && mode == SFmode)
11941 return true;
11942 if (TARGET_DOUBLE_FLOAT && mode == DFmode)
11943 return true;
11944 /* ABI_V4 passes complex IBM long double in 8 gprs.
11945 Stupid, but we can't change the ABI now. */
11946 if (is_complex_IBM_long_double (mode))
11947 return false;
11948 if (FLOAT128_2REG_P (mode))
11949 return true;
11950 if (DECIMAL_FLOAT_MODE_P (mode))
11951 return true;
11952 return false;
11955 /* If defined, a C expression which determines whether, and in which
11956 direction, to pad out an argument with extra space. The value
11957 should be of type `enum direction': either `upward' to pad above
11958 the argument, `downward' to pad below, or `none' to inhibit
11959 padding.
11961 For the AIX ABI structs are always stored left shifted in their
11962 argument slot. */
11964 enum direction
11965 function_arg_padding (machine_mode mode, const_tree type)
11967 #ifndef AGGREGATE_PADDING_FIXED
11968 #define AGGREGATE_PADDING_FIXED 0
11969 #endif
11970 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
11971 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
11972 #endif
11974 if (!AGGREGATE_PADDING_FIXED)
11976 /* GCC used to pass structures of the same size as integer types as
11977 if they were in fact integers, ignoring FUNCTION_ARG_PADDING.
11978 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
11979 passed padded downward, except that -mstrict-align further
11980 muddied the water in that multi-component structures of 2 and 4
11981 bytes in size were passed padded upward.
11983 The following arranges for best compatibility with previous
11984 versions of gcc, but removes the -mstrict-align dependency. */
11985 if (BYTES_BIG_ENDIAN)
11987 HOST_WIDE_INT size = 0;
11989 if (mode == BLKmode)
11991 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
11992 size = int_size_in_bytes (type);
11994 else
11995 size = GET_MODE_SIZE (mode);
11997 if (size == 1 || size == 2 || size == 4)
11998 return downward;
12000 return upward;
12003 if (AGGREGATES_PAD_UPWARD_ALWAYS)
12005 if (type != 0 && AGGREGATE_TYPE_P (type))
12006 return upward;
12009 /* Fall back to the default. */
12010 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
12013 /* If defined, a C expression that gives the alignment boundary, in bits,
12014 of an argument with the specified mode and type. If it is not defined,
12015 PARM_BOUNDARY is used for all arguments.
12017 V.4 wants long longs and doubles to be double word aligned. Just
12018 testing the mode size is a boneheaded way to do this as it means
12019 that other types such as complex int are also double word aligned.
12020 However, we're stuck with this because changing the ABI might break
12021 existing library interfaces.
12023 Doubleword align SPE vectors.
12024 Quadword align Altivec/VSX vectors.
12025 Quadword align large synthetic vector types. */
12027 static unsigned int
12028 rs6000_function_arg_boundary (machine_mode mode, const_tree type)
12030 machine_mode elt_mode;
12031 int n_elts;
12033 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
12035 if (DEFAULT_ABI == ABI_V4
12036 && (GET_MODE_SIZE (mode) == 8
12037 || (TARGET_HARD_FLOAT
12038 && TARGET_FPRS
12039 && !is_complex_IBM_long_double (mode)
12040 && FLOAT128_2REG_P (mode))))
12041 return 64;
12042 else if (FLOAT128_VECTOR_P (mode))
12043 return 128;
12044 else if (SPE_VECTOR_MODE (mode)
12045 || (type && TREE_CODE (type) == VECTOR_TYPE
12046 && int_size_in_bytes (type) >= 8
12047 && int_size_in_bytes (type) < 16))
12048 return 64;
12049 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
12050 || (type && TREE_CODE (type) == VECTOR_TYPE
12051 && int_size_in_bytes (type) >= 16))
12052 return 128;
12054 /* Aggregate types that need > 8 byte alignment are quadword-aligned
12055 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
12056 -mcompat-align-parm is used. */
12057 if (((DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm)
12058 || DEFAULT_ABI == ABI_ELFv2)
12059 && type && TYPE_ALIGN (type) > 64)
12061 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
12062 or homogeneous float/vector aggregates here. We already handled
12063 vector aggregates above, but still need to check for float here. */
12064 bool aggregate_p = (AGGREGATE_TYPE_P (type)
12065 && !SCALAR_FLOAT_MODE_P (elt_mode));
12067 /* We used to check for BLKmode instead of the above aggregate type
12068 check. Warn when this results in any difference to the ABI. */
12069 if (aggregate_p != (mode == BLKmode))
12071 static bool warned;
12072 if (!warned && warn_psabi)
12074 warned = true;
12075 inform (input_location,
12076 "the ABI of passing aggregates with %d-byte alignment"
12077 " has changed in GCC 5",
12078 (int) TYPE_ALIGN (type) / BITS_PER_UNIT);
12082 if (aggregate_p)
12083 return 128;
12086 /* Similar for the Darwin64 ABI. Note that for historical reasons we
12087 implement the "aggregate type" check as a BLKmode check here; this
12088 means certain aggregate types are in fact not aligned. */
12089 if (TARGET_MACHO && rs6000_darwin64_abi
12090 && mode == BLKmode
12091 && type && TYPE_ALIGN (type) > 64)
12092 return 128;
12094 return PARM_BOUNDARY;
12097 /* The offset in words to the start of the parameter save area. */
12099 static unsigned int
12100 rs6000_parm_offset (void)
12102 return (DEFAULT_ABI == ABI_V4 ? 2
12103 : DEFAULT_ABI == ABI_ELFv2 ? 4
12104 : 6);
12107 /* For a function parm of MODE and TYPE, return the starting word in
12108 the parameter area. NWORDS of the parameter area are already used. */
12110 static unsigned int
12111 rs6000_parm_start (machine_mode mode, const_tree type,
12112 unsigned int nwords)
12114 unsigned int align;
12116 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
12117 return nwords + (-(rs6000_parm_offset () + nwords) & align);
12120 /* Compute the size (in words) of a function argument. */
12122 static unsigned long
12123 rs6000_arg_size (machine_mode mode, const_tree type)
12125 unsigned long size;
12127 if (mode != BLKmode)
12128 size = GET_MODE_SIZE (mode);
12129 else
12130 size = int_size_in_bytes (type);
12132 if (TARGET_32BIT)
12133 return (size + 3) >> 2;
12134 else
12135 return (size + 7) >> 3;
12138 /* Use this to flush pending int fields. */
12140 static void
12141 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
12142 HOST_WIDE_INT bitpos, int final)
12144 unsigned int startbit, endbit;
12145 int intregs, intoffset;
12146 machine_mode mode;
12148 /* Handle the situations where a float is taking up the first half
12149 of the GPR, and the other half is empty (typically due to
12150 alignment restrictions). We can detect this by a 8-byte-aligned
12151 int field, or by seeing that this is the final flush for this
12152 argument. Count the word and continue on. */
12153 if (cum->floats_in_gpr == 1
12154 && (cum->intoffset % 64 == 0
12155 || (cum->intoffset == -1 && final)))
12157 cum->words++;
12158 cum->floats_in_gpr = 0;
12161 if (cum->intoffset == -1)
12162 return;
12164 intoffset = cum->intoffset;
12165 cum->intoffset = -1;
12166 cum->floats_in_gpr = 0;
12168 if (intoffset % BITS_PER_WORD != 0)
12170 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
12171 MODE_INT, 0);
12172 if (mode == BLKmode)
12174 /* We couldn't find an appropriate mode, which happens,
12175 e.g., in packed structs when there are 3 bytes to load.
12176 Back intoffset back to the beginning of the word in this
12177 case. */
12178 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
12182 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
12183 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
12184 intregs = (endbit - startbit) / BITS_PER_WORD;
12185 cum->words += intregs;
12186 /* words should be unsigned. */
12187 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
12189 int pad = (endbit/BITS_PER_WORD) - cum->words;
12190 cum->words += pad;
12194 /* The darwin64 ABI calls for us to recurse down through structs,
12195 looking for elements passed in registers. Unfortunately, we have
12196 to track int register count here also because of misalignments
12197 in powerpc alignment mode. */
12199 static void
12200 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
12201 const_tree type,
12202 HOST_WIDE_INT startbitpos)
12204 tree f;
12206 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
12207 if (TREE_CODE (f) == FIELD_DECL)
12209 HOST_WIDE_INT bitpos = startbitpos;
12210 tree ftype = TREE_TYPE (f);
12211 machine_mode mode;
12212 if (ftype == error_mark_node)
12213 continue;
12214 mode = TYPE_MODE (ftype);
12216 if (DECL_SIZE (f) != 0
12217 && tree_fits_uhwi_p (bit_position (f)))
12218 bitpos += int_bit_position (f);
12220 /* ??? FIXME: else assume zero offset. */
12222 if (TREE_CODE (ftype) == RECORD_TYPE)
12223 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
12224 else if (USE_FP_FOR_ARG_P (cum, mode))
12226 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
12227 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
12228 cum->fregno += n_fpregs;
12229 /* Single-precision floats present a special problem for
12230 us, because they are smaller than an 8-byte GPR, and so
12231 the structure-packing rules combined with the standard
12232 varargs behavior mean that we want to pack float/float
12233 and float/int combinations into a single register's
12234 space. This is complicated by the arg advance flushing,
12235 which works on arbitrarily large groups of int-type
12236 fields. */
12237 if (mode == SFmode)
12239 if (cum->floats_in_gpr == 1)
12241 /* Two floats in a word; count the word and reset
12242 the float count. */
12243 cum->words++;
12244 cum->floats_in_gpr = 0;
12246 else if (bitpos % 64 == 0)
12248 /* A float at the beginning of an 8-byte word;
12249 count it and put off adjusting cum->words until
12250 we see if a arg advance flush is going to do it
12251 for us. */
12252 cum->floats_in_gpr++;
12254 else
12256 /* The float is at the end of a word, preceded
12257 by integer fields, so the arg advance flush
12258 just above has already set cum->words and
12259 everything is taken care of. */
12262 else
12263 cum->words += n_fpregs;
12265 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
12267 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
12268 cum->vregno++;
12269 cum->words += 2;
12271 else if (cum->intoffset == -1)
12272 cum->intoffset = bitpos;
12276 /* Check for an item that needs to be considered specially under the darwin 64
12277 bit ABI. These are record types where the mode is BLK or the structure is
12278 8 bytes in size. */
12279 static int
12280 rs6000_darwin64_struct_check_p (machine_mode mode, const_tree type)
12282 return rs6000_darwin64_abi
12283 && ((mode == BLKmode
12284 && TREE_CODE (type) == RECORD_TYPE
12285 && int_size_in_bytes (type) > 0)
12286 || (type && TREE_CODE (type) == RECORD_TYPE
12287 && int_size_in_bytes (type) == 8)) ? 1 : 0;
12290 /* Update the data in CUM to advance over an argument
12291 of mode MODE and data type TYPE.
12292 (TYPE is null for libcalls where that information may not be available.)
12294 Note that for args passed by reference, function_arg will be called
12295 with MODE and TYPE set to that of the pointer to the arg, not the arg
12296 itself. */
12298 static void
12299 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, machine_mode mode,
12300 const_tree type, bool named, int depth)
12302 machine_mode elt_mode;
12303 int n_elts;
12305 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
12307 /* Only tick off an argument if we're not recursing. */
12308 if (depth == 0)
12309 cum->nargs_prototype--;
12311 #ifdef HAVE_AS_GNU_ATTRIBUTE
12312 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4)
12313 && cum->escapes)
12315 if (SCALAR_FLOAT_MODE_P (mode))
12317 rs6000_passes_float = true;
12318 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
12319 && (FLOAT128_IBM_P (mode)
12320 || FLOAT128_IEEE_P (mode)
12321 || (type != NULL
12322 && TYPE_MAIN_VARIANT (type) == long_double_type_node)))
12323 rs6000_passes_long_double = true;
12325 if ((named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
12326 || (SPE_VECTOR_MODE (mode)
12327 && !cum->stdarg
12328 && cum->sysv_gregno <= GP_ARG_MAX_REG))
12329 rs6000_passes_vector = true;
12331 #endif
12333 if (TARGET_ALTIVEC_ABI
12334 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
12335 || (type && TREE_CODE (type) == VECTOR_TYPE
12336 && int_size_in_bytes (type) == 16)))
12338 bool stack = false;
12340 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
12342 cum->vregno += n_elts;
12344 if (!TARGET_ALTIVEC)
12345 error ("cannot pass argument in vector register because"
12346 " altivec instructions are disabled, use -maltivec"
12347 " to enable them");
12349 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
12350 even if it is going to be passed in a vector register.
12351 Darwin does the same for variable-argument functions. */
12352 if (((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12353 && TARGET_64BIT)
12354 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
12355 stack = true;
12357 else
12358 stack = true;
12360 if (stack)
12362 int align;
12364 /* Vector parameters must be 16-byte aligned. In 32-bit
12365 mode this means we need to take into account the offset
12366 to the parameter save area. In 64-bit mode, they just
12367 have to start on an even word, since the parameter save
12368 area is 16-byte aligned. */
12369 if (TARGET_32BIT)
12370 align = -(rs6000_parm_offset () + cum->words) & 3;
12371 else
12372 align = cum->words & 1;
12373 cum->words += align + rs6000_arg_size (mode, type);
12375 if (TARGET_DEBUG_ARG)
12377 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
12378 cum->words, align);
12379 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
12380 cum->nargs_prototype, cum->prototype,
12381 GET_MODE_NAME (mode));
12385 else if (TARGET_SPE_ABI && TARGET_SPE && SPE_VECTOR_MODE (mode)
12386 && !cum->stdarg
12387 && cum->sysv_gregno <= GP_ARG_MAX_REG)
12388 cum->sysv_gregno++;
12390 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
12392 int size = int_size_in_bytes (type);
12393 /* Variable sized types have size == -1 and are
12394 treated as if consisting entirely of ints.
12395 Pad to 16 byte boundary if needed. */
12396 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
12397 && (cum->words % 2) != 0)
12398 cum->words++;
12399 /* For varargs, we can just go up by the size of the struct. */
12400 if (!named)
12401 cum->words += (size + 7) / 8;
12402 else
12404 /* It is tempting to say int register count just goes up by
12405 sizeof(type)/8, but this is wrong in a case such as
12406 { int; double; int; } [powerpc alignment]. We have to
12407 grovel through the fields for these too. */
12408 cum->intoffset = 0;
12409 cum->floats_in_gpr = 0;
12410 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
12411 rs6000_darwin64_record_arg_advance_flush (cum,
12412 size * BITS_PER_UNIT, 1);
12414 if (TARGET_DEBUG_ARG)
12416 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
12417 cum->words, TYPE_ALIGN (type), size);
12418 fprintf (stderr,
12419 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
12420 cum->nargs_prototype, cum->prototype,
12421 GET_MODE_NAME (mode));
12424 else if (DEFAULT_ABI == ABI_V4)
12426 if (abi_v4_pass_in_fpr (mode))
12428 /* _Decimal128 must use an even/odd register pair. This assumes
12429 that the register number is odd when fregno is odd. */
12430 if (mode == TDmode && (cum->fregno % 2) == 1)
12431 cum->fregno++;
12433 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
12434 <= FP_ARG_V4_MAX_REG)
12435 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
12436 else
12438 cum->fregno = FP_ARG_V4_MAX_REG + 1;
12439 if (mode == DFmode || FLOAT128_IBM_P (mode)
12440 || mode == DDmode || mode == TDmode)
12441 cum->words += cum->words & 1;
12442 cum->words += rs6000_arg_size (mode, type);
12445 else
12447 int n_words = rs6000_arg_size (mode, type);
12448 int gregno = cum->sysv_gregno;
12450 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
12451 (r7,r8) or (r9,r10). As does any other 2 word item such
12452 as complex int due to a historical mistake. */
12453 if (n_words == 2)
12454 gregno += (1 - gregno) & 1;
12456 /* Multi-reg args are not split between registers and stack. */
12457 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
12459 /* Long long and SPE vectors are aligned on the stack.
12460 So are other 2 word items such as complex int due to
12461 a historical mistake. */
12462 if (n_words == 2)
12463 cum->words += cum->words & 1;
12464 cum->words += n_words;
12467 /* Note: continuing to accumulate gregno past when we've started
12468 spilling to the stack indicates the fact that we've started
12469 spilling to the stack to expand_builtin_saveregs. */
12470 cum->sysv_gregno = gregno + n_words;
12473 if (TARGET_DEBUG_ARG)
12475 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
12476 cum->words, cum->fregno);
12477 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
12478 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
12479 fprintf (stderr, "mode = %4s, named = %d\n",
12480 GET_MODE_NAME (mode), named);
12483 else
12485 int n_words = rs6000_arg_size (mode, type);
12486 int start_words = cum->words;
12487 int align_words = rs6000_parm_start (mode, type, start_words);
12489 cum->words = align_words + n_words;
12491 if (SCALAR_FLOAT_MODE_P (elt_mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
12493 /* _Decimal128 must be passed in an even/odd float register pair.
12494 This assumes that the register number is odd when fregno is
12495 odd. */
12496 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
12497 cum->fregno++;
12498 cum->fregno += n_elts * ((GET_MODE_SIZE (elt_mode) + 7) >> 3);
12501 if (TARGET_DEBUG_ARG)
12503 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
12504 cum->words, cum->fregno);
12505 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
12506 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
12507 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
12508 named, align_words - start_words, depth);
12513 static void
12514 rs6000_function_arg_advance (cumulative_args_t cum, machine_mode mode,
12515 const_tree type, bool named)
12517 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
12521 static rtx
12522 spe_build_register_parallel (machine_mode mode, int gregno)
12524 rtx r1, r3, r5, r7;
12526 switch (mode)
12528 case DFmode:
12529 r1 = gen_rtx_REG (DImode, gregno);
12530 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
12531 return gen_rtx_PARALLEL (mode, gen_rtvec (1, r1));
12533 case DCmode:
12534 case TFmode:
12535 r1 = gen_rtx_REG (DImode, gregno);
12536 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
12537 r3 = gen_rtx_REG (DImode, gregno + 2);
12538 r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
12539 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r3));
12541 case TCmode:
12542 r1 = gen_rtx_REG (DImode, gregno);
12543 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
12544 r3 = gen_rtx_REG (DImode, gregno + 2);
12545 r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
12546 r5 = gen_rtx_REG (DImode, gregno + 4);
12547 r5 = gen_rtx_EXPR_LIST (VOIDmode, r5, GEN_INT (16));
12548 r7 = gen_rtx_REG (DImode, gregno + 6);
12549 r7 = gen_rtx_EXPR_LIST (VOIDmode, r7, GEN_INT (24));
12550 return gen_rtx_PARALLEL (mode, gen_rtvec (4, r1, r3, r5, r7));
12552 default:
12553 gcc_unreachable ();
12557 /* Determine where to put a SIMD argument on the SPE. */
12558 static rtx
12559 rs6000_spe_function_arg (const CUMULATIVE_ARGS *cum, machine_mode mode,
12560 const_tree type)
12562 int gregno = cum->sysv_gregno;
12564 /* On E500 v2, double arithmetic is done on the full 64-bit GPR, but
12565 are passed and returned in a pair of GPRs for ABI compatibility. */
12566 if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
12567 || mode == DCmode || mode == TCmode))
12569 int n_words = rs6000_arg_size (mode, type);
12571 /* Doubles go in an odd/even register pair (r5/r6, etc). */
12572 if (mode == DFmode)
12573 gregno += (1 - gregno) & 1;
12575 /* Multi-reg args are not split between registers and stack. */
12576 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
12577 return NULL_RTX;
12579 return spe_build_register_parallel (mode, gregno);
12581 if (cum->stdarg)
12583 int n_words = rs6000_arg_size (mode, type);
12585 /* SPE vectors are put in odd registers. */
12586 if (n_words == 2 && (gregno & 1) == 0)
12587 gregno += 1;
12589 if (gregno + n_words - 1 <= GP_ARG_MAX_REG)
12591 rtx r1, r2;
12592 machine_mode m = SImode;
12594 r1 = gen_rtx_REG (m, gregno);
12595 r1 = gen_rtx_EXPR_LIST (m, r1, const0_rtx);
12596 r2 = gen_rtx_REG (m, gregno + 1);
12597 r2 = gen_rtx_EXPR_LIST (m, r2, GEN_INT (4));
12598 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
12600 else
12601 return NULL_RTX;
12603 else
12605 if (gregno <= GP_ARG_MAX_REG)
12606 return gen_rtx_REG (mode, gregno);
12607 else
12608 return NULL_RTX;
12612 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
12613 structure between cum->intoffset and bitpos to integer registers. */
12615 static void
12616 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
12617 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
12619 machine_mode mode;
12620 unsigned int regno;
12621 unsigned int startbit, endbit;
12622 int this_regno, intregs, intoffset;
12623 rtx reg;
12625 if (cum->intoffset == -1)
12626 return;
12628 intoffset = cum->intoffset;
12629 cum->intoffset = -1;
12631 /* If this is the trailing part of a word, try to only load that
12632 much into the register. Otherwise load the whole register. Note
12633 that in the latter case we may pick up unwanted bits. It's not a
12634 problem at the moment but may wish to revisit. */
12636 if (intoffset % BITS_PER_WORD != 0)
12638 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
12639 MODE_INT, 0);
12640 if (mode == BLKmode)
12642 /* We couldn't find an appropriate mode, which happens,
12643 e.g., in packed structs when there are 3 bytes to load.
12644 Back intoffset back to the beginning of the word in this
12645 case. */
12646 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
12647 mode = word_mode;
12650 else
12651 mode = word_mode;
12653 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
12654 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
12655 intregs = (endbit - startbit) / BITS_PER_WORD;
12656 this_regno = cum->words + intoffset / BITS_PER_WORD;
12658 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
12659 cum->use_stack = 1;
12661 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
12662 if (intregs <= 0)
12663 return;
12665 intoffset /= BITS_PER_UNIT;
12668 regno = GP_ARG_MIN_REG + this_regno;
12669 reg = gen_rtx_REG (mode, regno);
12670 rvec[(*k)++] =
12671 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
12673 this_regno += 1;
12674 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
12675 mode = word_mode;
12676 intregs -= 1;
12678 while (intregs > 0);
12681 /* Recursive workhorse for the following. */
12683 static void
12684 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
12685 HOST_WIDE_INT startbitpos, rtx rvec[],
12686 int *k)
12688 tree f;
12690 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
12691 if (TREE_CODE (f) == FIELD_DECL)
12693 HOST_WIDE_INT bitpos = startbitpos;
12694 tree ftype = TREE_TYPE (f);
12695 machine_mode mode;
12696 if (ftype == error_mark_node)
12697 continue;
12698 mode = TYPE_MODE (ftype);
12700 if (DECL_SIZE (f) != 0
12701 && tree_fits_uhwi_p (bit_position (f)))
12702 bitpos += int_bit_position (f);
12704 /* ??? FIXME: else assume zero offset. */
12706 if (TREE_CODE (ftype) == RECORD_TYPE)
12707 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
12708 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode))
12710 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
12711 #if 0
12712 switch (mode)
12714 case SCmode: mode = SFmode; break;
12715 case DCmode: mode = DFmode; break;
12716 case TCmode: mode = TFmode; break;
12717 default: break;
12719 #endif
12720 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
12721 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
12723 gcc_assert (cum->fregno == FP_ARG_MAX_REG
12724 && (mode == TFmode || mode == TDmode));
12725 /* Long double or _Decimal128 split over regs and memory. */
12726 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
12727 cum->use_stack=1;
12729 rvec[(*k)++]
12730 = gen_rtx_EXPR_LIST (VOIDmode,
12731 gen_rtx_REG (mode, cum->fregno++),
12732 GEN_INT (bitpos / BITS_PER_UNIT));
12733 if (FLOAT128_2REG_P (mode))
12734 cum->fregno++;
12736 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
12738 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
12739 rvec[(*k)++]
12740 = gen_rtx_EXPR_LIST (VOIDmode,
12741 gen_rtx_REG (mode, cum->vregno++),
12742 GEN_INT (bitpos / BITS_PER_UNIT));
12744 else if (cum->intoffset == -1)
12745 cum->intoffset = bitpos;
12749 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
12750 the register(s) to be used for each field and subfield of a struct
12751 being passed by value, along with the offset of where the
12752 register's value may be found in the block. FP fields go in FP
12753 register, vector fields go in vector registers, and everything
12754 else goes in int registers, packed as in memory.
12756 This code is also used for function return values. RETVAL indicates
12757 whether this is the case.
12759 Much of this is taken from the SPARC V9 port, which has a similar
12760 calling convention. */
12762 static rtx
12763 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
12764 bool named, bool retval)
12766 rtx rvec[FIRST_PSEUDO_REGISTER];
12767 int k = 1, kbase = 1;
12768 HOST_WIDE_INT typesize = int_size_in_bytes (type);
12769 /* This is a copy; modifications are not visible to our caller. */
12770 CUMULATIVE_ARGS copy_cum = *orig_cum;
12771 CUMULATIVE_ARGS *cum = &copy_cum;
12773 /* Pad to 16 byte boundary if needed. */
12774 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
12775 && (cum->words % 2) != 0)
12776 cum->words++;
12778 cum->intoffset = 0;
12779 cum->use_stack = 0;
12780 cum->named = named;
12782 /* Put entries into rvec[] for individual FP and vector fields, and
12783 for the chunks of memory that go in int regs. Note we start at
12784 element 1; 0 is reserved for an indication of using memory, and
12785 may or may not be filled in below. */
12786 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
12787 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
12789 /* If any part of the struct went on the stack put all of it there.
12790 This hack is because the generic code for
12791 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
12792 parts of the struct are not at the beginning. */
12793 if (cum->use_stack)
12795 if (retval)
12796 return NULL_RTX; /* doesn't go in registers at all */
12797 kbase = 0;
12798 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12800 if (k > 1 || cum->use_stack)
12801 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
12802 else
12803 return NULL_RTX;
12806 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
12808 static rtx
12809 rs6000_mixed_function_arg (machine_mode mode, const_tree type,
12810 int align_words)
12812 int n_units;
12813 int i, k;
12814 rtx rvec[GP_ARG_NUM_REG + 1];
12816 if (align_words >= GP_ARG_NUM_REG)
12817 return NULL_RTX;
12819 n_units = rs6000_arg_size (mode, type);
12821 /* Optimize the simple case where the arg fits in one gpr, except in
12822 the case of BLKmode due to assign_parms assuming that registers are
12823 BITS_PER_WORD wide. */
12824 if (n_units == 0
12825 || (n_units == 1 && mode != BLKmode))
12826 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12828 k = 0;
12829 if (align_words + n_units > GP_ARG_NUM_REG)
12830 /* Not all of the arg fits in gprs. Say that it goes in memory too,
12831 using a magic NULL_RTX component.
12832 This is not strictly correct. Only some of the arg belongs in
12833 memory, not all of it. However, the normal scheme using
12834 function_arg_partial_nregs can result in unusual subregs, eg.
12835 (subreg:SI (reg:DF) 4), which are not handled well. The code to
12836 store the whole arg to memory is often more efficient than code
12837 to store pieces, and we know that space is available in the right
12838 place for the whole arg. */
12839 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12841 i = 0;
12844 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
12845 rtx off = GEN_INT (i++ * 4);
12846 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12848 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
12850 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
12853 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
12854 but must also be copied into the parameter save area starting at
12855 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
12856 to the GPRs and/or memory. Return the number of elements used. */
12858 static int
12859 rs6000_psave_function_arg (machine_mode mode, const_tree type,
12860 int align_words, rtx *rvec)
12862 int k = 0;
12864 if (align_words < GP_ARG_NUM_REG)
12866 int n_words = rs6000_arg_size (mode, type);
12868 if (align_words + n_words > GP_ARG_NUM_REG
12869 || mode == BLKmode
12870 || (TARGET_32BIT && TARGET_POWERPC64))
12872 /* If this is partially on the stack, then we only
12873 include the portion actually in registers here. */
12874 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
12875 int i = 0;
12877 if (align_words + n_words > GP_ARG_NUM_REG)
12879 /* Not all of the arg fits in gprs. Say that it goes in memory
12880 too, using a magic NULL_RTX component. Also see comment in
12881 rs6000_mixed_function_arg for why the normal
12882 function_arg_partial_nregs scheme doesn't work in this case. */
12883 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12888 rtx r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
12889 rtx off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
12890 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12892 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
12894 else
12896 /* The whole arg fits in gprs. */
12897 rtx r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12898 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
12901 else
12903 /* It's entirely in memory. */
12904 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12907 return k;
12910 /* RVEC is a vector of K components of an argument of mode MODE.
12911 Construct the final function_arg return value from it. */
12913 static rtx
12914 rs6000_finish_function_arg (machine_mode mode, rtx *rvec, int k)
12916 gcc_assert (k >= 1);
12918 /* Avoid returning a PARALLEL in the trivial cases. */
12919 if (k == 1)
12921 if (XEXP (rvec[0], 0) == NULL_RTX)
12922 return NULL_RTX;
12924 if (GET_MODE (XEXP (rvec[0], 0)) == mode)
12925 return XEXP (rvec[0], 0);
12928 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
12931 /* Determine where to put an argument to a function.
12932 Value is zero to push the argument on the stack,
12933 or a hard register in which to store the argument.
12935 MODE is the argument's machine mode.
12936 TYPE is the data type of the argument (as a tree).
12937 This is null for libcalls where that information may
12938 not be available.
12939 CUM is a variable of type CUMULATIVE_ARGS which gives info about
12940 the preceding args and about the function being called. It is
12941 not modified in this routine.
12942 NAMED is nonzero if this argument is a named parameter
12943 (otherwise it is an extra parameter matching an ellipsis).
12945 On RS/6000 the first eight words of non-FP are normally in registers
12946 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
12947 Under V.4, the first 8 FP args are in registers.
12949 If this is floating-point and no prototype is specified, we use
12950 both an FP and integer register (or possibly FP reg and stack). Library
12951 functions (when CALL_LIBCALL is set) always have the proper types for args,
12952 so we can pass the FP value just in one register. emit_library_function
12953 doesn't support PARALLEL anyway.
12955 Note that for args passed by reference, function_arg will be called
12956 with MODE and TYPE set to that of the pointer to the arg, not the arg
12957 itself. */
12959 static rtx
12960 rs6000_function_arg (cumulative_args_t cum_v, machine_mode mode,
12961 const_tree type, bool named)
12963 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12964 enum rs6000_abi abi = DEFAULT_ABI;
12965 machine_mode elt_mode;
12966 int n_elts;
12968 /* Return a marker to indicate whether CR1 needs to set or clear the
12969 bit that V.4 uses to say fp args were passed in registers.
12970 Assume that we don't need the marker for software floating point,
12971 or compiler generated library calls. */
12972 if (mode == VOIDmode)
12974 if (abi == ABI_V4
12975 && (cum->call_cookie & CALL_LIBCALL) == 0
12976 && (cum->stdarg
12977 || (cum->nargs_prototype < 0
12978 && (cum->prototype || TARGET_NO_PROTOTYPE))))
12980 /* For the SPE, we need to crxor CR6 always. */
12981 if (TARGET_SPE_ABI)
12982 return GEN_INT (cum->call_cookie | CALL_V4_SET_FP_ARGS);
12983 else if (TARGET_HARD_FLOAT && TARGET_FPRS)
12984 return GEN_INT (cum->call_cookie
12985 | ((cum->fregno == FP_ARG_MIN_REG)
12986 ? CALL_V4_SET_FP_ARGS
12987 : CALL_V4_CLEAR_FP_ARGS));
12990 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
12993 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
12995 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
12997 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
12998 if (rslt != NULL_RTX)
12999 return rslt;
13000 /* Else fall through to usual handling. */
13003 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
13005 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
13006 rtx r, off;
13007 int i, k = 0;
13009 /* Do we also need to pass this argument in the parameter save area?
13010 Library support functions for IEEE 128-bit are assumed to not need the
13011 value passed both in GPRs and in vector registers. */
13012 if (TARGET_64BIT && !cum->prototype
13013 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
13015 int align_words = ROUND_UP (cum->words, 2);
13016 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
13019 /* Describe where this argument goes in the vector registers. */
13020 for (i = 0; i < n_elts && cum->vregno + i <= ALTIVEC_ARG_MAX_REG; i++)
13022 r = gen_rtx_REG (elt_mode, cum->vregno + i);
13023 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
13024 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
13027 return rs6000_finish_function_arg (mode, rvec, k);
13029 else if (TARGET_ALTIVEC_ABI
13030 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
13031 || (type && TREE_CODE (type) == VECTOR_TYPE
13032 && int_size_in_bytes (type) == 16)))
13034 if (named || abi == ABI_V4)
13035 return NULL_RTX;
13036 else
13038 /* Vector parameters to varargs functions under AIX or Darwin
13039 get passed in memory and possibly also in GPRs. */
13040 int align, align_words, n_words;
13041 machine_mode part_mode;
13043 /* Vector parameters must be 16-byte aligned. In 32-bit
13044 mode this means we need to take into account the offset
13045 to the parameter save area. In 64-bit mode, they just
13046 have to start on an even word, since the parameter save
13047 area is 16-byte aligned. */
13048 if (TARGET_32BIT)
13049 align = -(rs6000_parm_offset () + cum->words) & 3;
13050 else
13051 align = cum->words & 1;
13052 align_words = cum->words + align;
13054 /* Out of registers? Memory, then. */
13055 if (align_words >= GP_ARG_NUM_REG)
13056 return NULL_RTX;
13058 if (TARGET_32BIT && TARGET_POWERPC64)
13059 return rs6000_mixed_function_arg (mode, type, align_words);
13061 /* The vector value goes in GPRs. Only the part of the
13062 value in GPRs is reported here. */
13063 part_mode = mode;
13064 n_words = rs6000_arg_size (mode, type);
13065 if (align_words + n_words > GP_ARG_NUM_REG)
13066 /* Fortunately, there are only two possibilities, the value
13067 is either wholly in GPRs or half in GPRs and half not. */
13068 part_mode = DImode;
13070 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
13073 else if (TARGET_SPE_ABI && TARGET_SPE
13074 && (SPE_VECTOR_MODE (mode)
13075 || (TARGET_E500_DOUBLE && (mode == DFmode
13076 || mode == DCmode
13077 || mode == TFmode
13078 || mode == TCmode))))
13079 return rs6000_spe_function_arg (cum, mode, type);
13081 else if (abi == ABI_V4)
13083 if (abi_v4_pass_in_fpr (mode))
13085 /* _Decimal128 must use an even/odd register pair. This assumes
13086 that the register number is odd when fregno is odd. */
13087 if (mode == TDmode && (cum->fregno % 2) == 1)
13088 cum->fregno++;
13090 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
13091 <= FP_ARG_V4_MAX_REG)
13092 return gen_rtx_REG (mode, cum->fregno);
13093 else
13094 return NULL_RTX;
13096 else
13098 int n_words = rs6000_arg_size (mode, type);
13099 int gregno = cum->sysv_gregno;
13101 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
13102 (r7,r8) or (r9,r10). As does any other 2 word item such
13103 as complex int due to a historical mistake. */
13104 if (n_words == 2)
13105 gregno += (1 - gregno) & 1;
13107 /* Multi-reg args are not split between registers and stack. */
13108 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
13109 return NULL_RTX;
13111 if (TARGET_32BIT && TARGET_POWERPC64)
13112 return rs6000_mixed_function_arg (mode, type,
13113 gregno - GP_ARG_MIN_REG);
13114 return gen_rtx_REG (mode, gregno);
13117 else
13119 int align_words = rs6000_parm_start (mode, type, cum->words);
13121 /* _Decimal128 must be passed in an even/odd float register pair.
13122 This assumes that the register number is odd when fregno is odd. */
13123 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
13124 cum->fregno++;
13126 if (USE_FP_FOR_ARG_P (cum, elt_mode))
13128 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
13129 rtx r, off;
13130 int i, k = 0;
13131 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
13132 int fpr_words;
13134 /* Do we also need to pass this argument in the parameter
13135 save area? */
13136 if (type && (cum->nargs_prototype <= 0
13137 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
13138 && TARGET_XL_COMPAT
13139 && align_words >= GP_ARG_NUM_REG)))
13140 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
13142 /* Describe where this argument goes in the fprs. */
13143 for (i = 0; i < n_elts
13144 && cum->fregno + i * n_fpreg <= FP_ARG_MAX_REG; i++)
13146 /* Check if the argument is split over registers and memory.
13147 This can only ever happen for long double or _Decimal128;
13148 complex types are handled via split_complex_arg. */
13149 machine_mode fmode = elt_mode;
13150 if (cum->fregno + (i + 1) * n_fpreg > FP_ARG_MAX_REG + 1)
13152 gcc_assert (FLOAT128_2REG_P (fmode));
13153 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
13156 r = gen_rtx_REG (fmode, cum->fregno + i * n_fpreg);
13157 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
13158 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
13161 /* If there were not enough FPRs to hold the argument, the rest
13162 usually goes into memory. However, if the current position
13163 is still within the register parameter area, a portion may
13164 actually have to go into GPRs.
13166 Note that it may happen that the portion of the argument
13167 passed in the first "half" of the first GPR was already
13168 passed in the last FPR as well.
13170 For unnamed arguments, we already set up GPRs to cover the
13171 whole argument in rs6000_psave_function_arg, so there is
13172 nothing further to do at this point. */
13173 fpr_words = (i * GET_MODE_SIZE (elt_mode)) / (TARGET_32BIT ? 4 : 8);
13174 if (i < n_elts && align_words + fpr_words < GP_ARG_NUM_REG
13175 && cum->nargs_prototype > 0)
13177 static bool warned;
13179 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
13180 int n_words = rs6000_arg_size (mode, type);
13182 align_words += fpr_words;
13183 n_words -= fpr_words;
13187 r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
13188 off = GEN_INT (fpr_words++ * GET_MODE_SIZE (rmode));
13189 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
13191 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
13193 if (!warned && warn_psabi)
13195 warned = true;
13196 inform (input_location,
13197 "the ABI of passing homogeneous float aggregates"
13198 " has changed in GCC 5");
13202 return rs6000_finish_function_arg (mode, rvec, k);
13204 else if (align_words < GP_ARG_NUM_REG)
13206 if (TARGET_32BIT && TARGET_POWERPC64)
13207 return rs6000_mixed_function_arg (mode, type, align_words);
13209 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
13211 else
13212 return NULL_RTX;
13216 /* For an arg passed partly in registers and partly in memory, this is
13217 the number of bytes passed in registers. For args passed entirely in
13218 registers or entirely in memory, zero. When an arg is described by a
13219 PARALLEL, perhaps using more than one register type, this function
13220 returns the number of bytes used by the first element of the PARALLEL. */
13222 static int
13223 rs6000_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
13224 tree type, bool named)
13226 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
13227 bool passed_in_gprs = true;
13228 int ret = 0;
13229 int align_words;
13230 machine_mode elt_mode;
13231 int n_elts;
13233 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
13235 if (DEFAULT_ABI == ABI_V4)
13236 return 0;
13238 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
13240 /* If we are passing this arg in the fixed parameter save area (gprs or
13241 memory) as well as VRs, we do not use the partial bytes mechanism;
13242 instead, rs6000_function_arg will return a PARALLEL including a memory
13243 element as necessary. Library support functions for IEEE 128-bit are
13244 assumed to not need the value passed both in GPRs and in vector
13245 registers. */
13246 if (TARGET_64BIT && !cum->prototype
13247 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
13248 return 0;
13250 /* Otherwise, we pass in VRs only. Check for partial copies. */
13251 passed_in_gprs = false;
13252 if (cum->vregno + n_elts > ALTIVEC_ARG_MAX_REG + 1)
13253 ret = (ALTIVEC_ARG_MAX_REG + 1 - cum->vregno) * 16;
13256 /* In this complicated case we just disable the partial_nregs code. */
13257 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
13258 return 0;
13260 align_words = rs6000_parm_start (mode, type, cum->words);
13262 if (USE_FP_FOR_ARG_P (cum, elt_mode))
13264 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
13266 /* If we are passing this arg in the fixed parameter save area
13267 (gprs or memory) as well as FPRs, we do not use the partial
13268 bytes mechanism; instead, rs6000_function_arg will return a
13269 PARALLEL including a memory element as necessary. */
13270 if (type
13271 && (cum->nargs_prototype <= 0
13272 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
13273 && TARGET_XL_COMPAT
13274 && align_words >= GP_ARG_NUM_REG)))
13275 return 0;
13277 /* Otherwise, we pass in FPRs only. Check for partial copies. */
13278 passed_in_gprs = false;
13279 if (cum->fregno + n_elts * n_fpreg > FP_ARG_MAX_REG + 1)
13281 /* Compute number of bytes / words passed in FPRs. If there
13282 is still space available in the register parameter area
13283 *after* that amount, a part of the argument will be passed
13284 in GPRs. In that case, the total amount passed in any
13285 registers is equal to the amount that would have been passed
13286 in GPRs if everything were passed there, so we fall back to
13287 the GPR code below to compute the appropriate value. */
13288 int fpr = ((FP_ARG_MAX_REG + 1 - cum->fregno)
13289 * MIN (8, GET_MODE_SIZE (elt_mode)));
13290 int fpr_words = fpr / (TARGET_32BIT ? 4 : 8);
13292 if (align_words + fpr_words < GP_ARG_NUM_REG)
13293 passed_in_gprs = true;
13294 else
13295 ret = fpr;
13299 if (passed_in_gprs
13300 && align_words < GP_ARG_NUM_REG
13301 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
13302 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
13304 if (ret != 0 && TARGET_DEBUG_ARG)
13305 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
13307 return ret;
13310 /* A C expression that indicates when an argument must be passed by
13311 reference. If nonzero for an argument, a copy of that argument is
13312 made in memory and a pointer to the argument is passed instead of
13313 the argument itself. The pointer is passed in whatever way is
13314 appropriate for passing a pointer to that type.
13316 Under V.4, aggregates and long double are passed by reference.
13318 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
13319 reference unless the AltiVec vector extension ABI is in force.
13321 As an extension to all ABIs, variable sized types are passed by
13322 reference. */
13324 static bool
13325 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
13326 machine_mode mode, const_tree type,
13327 bool named ATTRIBUTE_UNUSED)
13329 if (!type)
13330 return 0;
13332 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
13333 && FLOAT128_IEEE_P (TYPE_MODE (type)))
13335 if (TARGET_DEBUG_ARG)
13336 fprintf (stderr, "function_arg_pass_by_reference: V4 IEEE 128-bit\n");
13337 return 1;
13340 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
13342 if (TARGET_DEBUG_ARG)
13343 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
13344 return 1;
13347 if (int_size_in_bytes (type) < 0)
13349 if (TARGET_DEBUG_ARG)
13350 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
13351 return 1;
13354 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
13355 modes only exist for GCC vector types if -maltivec. */
13356 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
13358 if (TARGET_DEBUG_ARG)
13359 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
13360 return 1;
13363 /* Pass synthetic vectors in memory. */
13364 if (TREE_CODE (type) == VECTOR_TYPE
13365 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
13367 static bool warned_for_pass_big_vectors = false;
13368 if (TARGET_DEBUG_ARG)
13369 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
13370 if (!warned_for_pass_big_vectors)
13372 warning (OPT_Wpsabi, "GCC vector passed by reference: "
13373 "non-standard ABI extension with no compatibility guarantee");
13374 warned_for_pass_big_vectors = true;
13376 return 1;
13379 return 0;
13382 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
13383 already processes. Return true if the parameter must be passed
13384 (fully or partially) on the stack. */
13386 static bool
13387 rs6000_parm_needs_stack (cumulative_args_t args_so_far, tree type)
13389 machine_mode mode;
13390 int unsignedp;
13391 rtx entry_parm;
13393 /* Catch errors. */
13394 if (type == NULL || type == error_mark_node)
13395 return true;
13397 /* Handle types with no storage requirement. */
13398 if (TYPE_MODE (type) == VOIDmode)
13399 return false;
13401 /* Handle complex types. */
13402 if (TREE_CODE (type) == COMPLEX_TYPE)
13403 return (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type))
13404 || rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type)));
13406 /* Handle transparent aggregates. */
13407 if ((TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == RECORD_TYPE)
13408 && TYPE_TRANSPARENT_AGGR (type))
13409 type = TREE_TYPE (first_field (type));
13411 /* See if this arg was passed by invisible reference. */
13412 if (pass_by_reference (get_cumulative_args (args_so_far),
13413 TYPE_MODE (type), type, true))
13414 type = build_pointer_type (type);
13416 /* Find mode as it is passed by the ABI. */
13417 unsignedp = TYPE_UNSIGNED (type);
13418 mode = promote_mode (type, TYPE_MODE (type), &unsignedp);
13420 /* If we must pass in stack, we need a stack. */
13421 if (rs6000_must_pass_in_stack (mode, type))
13422 return true;
13424 /* If there is no incoming register, we need a stack. */
13425 entry_parm = rs6000_function_arg (args_so_far, mode, type, true);
13426 if (entry_parm == NULL)
13427 return true;
13429 /* Likewise if we need to pass both in registers and on the stack. */
13430 if (GET_CODE (entry_parm) == PARALLEL
13431 && XEXP (XVECEXP (entry_parm, 0, 0), 0) == NULL_RTX)
13432 return true;
13434 /* Also true if we're partially in registers and partially not. */
13435 if (rs6000_arg_partial_bytes (args_so_far, mode, type, true) != 0)
13436 return true;
13438 /* Update info on where next arg arrives in registers. */
13439 rs6000_function_arg_advance (args_so_far, mode, type, true);
13440 return false;
13443 /* Return true if FUN has no prototype, has a variable argument
13444 list, or passes any parameter in memory. */
13446 static bool
13447 rs6000_function_parms_need_stack (tree fun, bool incoming)
13449 tree fntype, result;
13450 CUMULATIVE_ARGS args_so_far_v;
13451 cumulative_args_t args_so_far;
13453 if (!fun)
13454 /* Must be a libcall, all of which only use reg parms. */
13455 return false;
13457 fntype = fun;
13458 if (!TYPE_P (fun))
13459 fntype = TREE_TYPE (fun);
13461 /* Varargs functions need the parameter save area. */
13462 if ((!incoming && !prototype_p (fntype)) || stdarg_p (fntype))
13463 return true;
13465 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v, fntype, NULL_RTX);
13466 args_so_far = pack_cumulative_args (&args_so_far_v);
13468 /* When incoming, we will have been passed the function decl.
13469 It is necessary to use the decl to handle K&R style functions,
13470 where TYPE_ARG_TYPES may not be available. */
13471 if (incoming)
13473 gcc_assert (DECL_P (fun));
13474 result = DECL_RESULT (fun);
13476 else
13477 result = TREE_TYPE (fntype);
13479 if (result && aggregate_value_p (result, fntype))
13481 if (!TYPE_P (result))
13482 result = TREE_TYPE (result);
13483 result = build_pointer_type (result);
13484 rs6000_parm_needs_stack (args_so_far, result);
13487 if (incoming)
13489 tree parm;
13491 for (parm = DECL_ARGUMENTS (fun);
13492 parm && parm != void_list_node;
13493 parm = TREE_CHAIN (parm))
13494 if (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (parm)))
13495 return true;
13497 else
13499 function_args_iterator args_iter;
13500 tree arg_type;
13502 FOREACH_FUNCTION_ARGS (fntype, arg_type, args_iter)
13503 if (rs6000_parm_needs_stack (args_so_far, arg_type))
13504 return true;
13507 return false;
13510 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
13511 usually a constant depending on the ABI. However, in the ELFv2 ABI
13512 the register parameter area is optional when calling a function that
13513 has a prototype is scope, has no variable argument list, and passes
13514 all parameters in registers. */
13517 rs6000_reg_parm_stack_space (tree fun, bool incoming)
13519 int reg_parm_stack_space;
13521 switch (DEFAULT_ABI)
13523 default:
13524 reg_parm_stack_space = 0;
13525 break;
13527 case ABI_AIX:
13528 case ABI_DARWIN:
13529 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
13530 break;
13532 case ABI_ELFv2:
13533 /* ??? Recomputing this every time is a bit expensive. Is there
13534 a place to cache this information? */
13535 if (rs6000_function_parms_need_stack (fun, incoming))
13536 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
13537 else
13538 reg_parm_stack_space = 0;
13539 break;
13542 return reg_parm_stack_space;
13545 static void
13546 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
13548 int i;
13549 machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
13551 if (nregs == 0)
13552 return;
13554 for (i = 0; i < nregs; i++)
13556 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
13557 if (reload_completed)
13559 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
13560 tem = NULL_RTX;
13561 else
13562 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
13563 i * GET_MODE_SIZE (reg_mode));
13565 else
13566 tem = replace_equiv_address (tem, XEXP (tem, 0));
13568 gcc_assert (tem);
13570 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
13574 /* Perform any needed actions needed for a function that is receiving a
13575 variable number of arguments.
13577 CUM is as above.
13579 MODE and TYPE are the mode and type of the current parameter.
13581 PRETEND_SIZE is a variable that should be set to the amount of stack
13582 that must be pushed by the prolog to pretend that our caller pushed
13585 Normally, this macro will push all remaining incoming registers on the
13586 stack and set PRETEND_SIZE to the length of the registers pushed. */
13588 static void
13589 setup_incoming_varargs (cumulative_args_t cum, machine_mode mode,
13590 tree type, int *pretend_size ATTRIBUTE_UNUSED,
13591 int no_rtl)
13593 CUMULATIVE_ARGS next_cum;
13594 int reg_size = TARGET_32BIT ? 4 : 8;
13595 rtx save_area = NULL_RTX, mem;
13596 int first_reg_offset;
13597 alias_set_type set;
13599 /* Skip the last named argument. */
13600 next_cum = *get_cumulative_args (cum);
13601 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
13603 if (DEFAULT_ABI == ABI_V4)
13605 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
13607 if (! no_rtl)
13609 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
13610 HOST_WIDE_INT offset = 0;
13612 /* Try to optimize the size of the varargs save area.
13613 The ABI requires that ap.reg_save_area is doubleword
13614 aligned, but we don't need to allocate space for all
13615 the bytes, only those to which we actually will save
13616 anything. */
13617 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
13618 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
13619 if (TARGET_HARD_FLOAT && TARGET_FPRS
13620 && next_cum.fregno <= FP_ARG_V4_MAX_REG
13621 && cfun->va_list_fpr_size)
13623 if (gpr_reg_num)
13624 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
13625 * UNITS_PER_FP_WORD;
13626 if (cfun->va_list_fpr_size
13627 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
13628 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
13629 else
13630 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
13631 * UNITS_PER_FP_WORD;
13633 if (gpr_reg_num)
13635 offset = -((first_reg_offset * reg_size) & ~7);
13636 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
13638 gpr_reg_num = cfun->va_list_gpr_size;
13639 if (reg_size == 4 && (first_reg_offset & 1))
13640 gpr_reg_num++;
13642 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
13644 else if (fpr_size)
13645 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
13646 * UNITS_PER_FP_WORD
13647 - (int) (GP_ARG_NUM_REG * reg_size);
13649 if (gpr_size + fpr_size)
13651 rtx reg_save_area
13652 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
13653 gcc_assert (GET_CODE (reg_save_area) == MEM);
13654 reg_save_area = XEXP (reg_save_area, 0);
13655 if (GET_CODE (reg_save_area) == PLUS)
13657 gcc_assert (XEXP (reg_save_area, 0)
13658 == virtual_stack_vars_rtx);
13659 gcc_assert (GET_CODE (XEXP (reg_save_area, 1)) == CONST_INT);
13660 offset += INTVAL (XEXP (reg_save_area, 1));
13662 else
13663 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
13666 cfun->machine->varargs_save_offset = offset;
13667 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
13670 else
13672 first_reg_offset = next_cum.words;
13673 save_area = crtl->args.internal_arg_pointer;
13675 if (targetm.calls.must_pass_in_stack (mode, type))
13676 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
13679 set = get_varargs_alias_set ();
13680 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
13681 && cfun->va_list_gpr_size)
13683 int n_gpr, nregs = GP_ARG_NUM_REG - first_reg_offset;
13685 if (va_list_gpr_counter_field)
13686 /* V4 va_list_gpr_size counts number of registers needed. */
13687 n_gpr = cfun->va_list_gpr_size;
13688 else
13689 /* char * va_list instead counts number of bytes needed. */
13690 n_gpr = (cfun->va_list_gpr_size + reg_size - 1) / reg_size;
13692 if (nregs > n_gpr)
13693 nregs = n_gpr;
13695 mem = gen_rtx_MEM (BLKmode,
13696 plus_constant (Pmode, save_area,
13697 first_reg_offset * reg_size));
13698 MEM_NOTRAP_P (mem) = 1;
13699 set_mem_alias_set (mem, set);
13700 set_mem_align (mem, BITS_PER_WORD);
13702 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
13703 nregs);
13706 /* Save FP registers if needed. */
13707 if (DEFAULT_ABI == ABI_V4
13708 && TARGET_HARD_FLOAT && TARGET_FPRS
13709 && ! no_rtl
13710 && next_cum.fregno <= FP_ARG_V4_MAX_REG
13711 && cfun->va_list_fpr_size)
13713 int fregno = next_cum.fregno, nregs;
13714 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
13715 rtx lab = gen_label_rtx ();
13716 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
13717 * UNITS_PER_FP_WORD);
13719 emit_jump_insn
13720 (gen_rtx_SET (pc_rtx,
13721 gen_rtx_IF_THEN_ELSE (VOIDmode,
13722 gen_rtx_NE (VOIDmode, cr1,
13723 const0_rtx),
13724 gen_rtx_LABEL_REF (VOIDmode, lab),
13725 pc_rtx)));
13727 for (nregs = 0;
13728 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
13729 fregno++, off += UNITS_PER_FP_WORD, nregs++)
13731 mem = gen_rtx_MEM ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
13732 ? DFmode : SFmode,
13733 plus_constant (Pmode, save_area, off));
13734 MEM_NOTRAP_P (mem) = 1;
13735 set_mem_alias_set (mem, set);
13736 set_mem_align (mem, GET_MODE_ALIGNMENT (
13737 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
13738 ? DFmode : SFmode));
13739 emit_move_insn (mem, gen_rtx_REG (
13740 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
13741 ? DFmode : SFmode, fregno));
13744 emit_label (lab);
13748 /* Create the va_list data type. */
13750 static tree
13751 rs6000_build_builtin_va_list (void)
13753 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
13755 /* For AIX, prefer 'char *' because that's what the system
13756 header files like. */
13757 if (DEFAULT_ABI != ABI_V4)
13758 return build_pointer_type (char_type_node);
13760 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
13761 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
13762 get_identifier ("__va_list_tag"), record);
13764 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
13765 unsigned_char_type_node);
13766 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
13767 unsigned_char_type_node);
13768 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
13769 every user file. */
13770 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
13771 get_identifier ("reserved"), short_unsigned_type_node);
13772 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
13773 get_identifier ("overflow_arg_area"),
13774 ptr_type_node);
13775 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
13776 get_identifier ("reg_save_area"),
13777 ptr_type_node);
13779 va_list_gpr_counter_field = f_gpr;
13780 va_list_fpr_counter_field = f_fpr;
13782 DECL_FIELD_CONTEXT (f_gpr) = record;
13783 DECL_FIELD_CONTEXT (f_fpr) = record;
13784 DECL_FIELD_CONTEXT (f_res) = record;
13785 DECL_FIELD_CONTEXT (f_ovf) = record;
13786 DECL_FIELD_CONTEXT (f_sav) = record;
13788 TYPE_STUB_DECL (record) = type_decl;
13789 TYPE_NAME (record) = type_decl;
13790 TYPE_FIELDS (record) = f_gpr;
13791 DECL_CHAIN (f_gpr) = f_fpr;
13792 DECL_CHAIN (f_fpr) = f_res;
13793 DECL_CHAIN (f_res) = f_ovf;
13794 DECL_CHAIN (f_ovf) = f_sav;
13796 layout_type (record);
13798 /* The correct type is an array type of one element. */
13799 return build_array_type (record, build_index_type (size_zero_node));
13802 /* Implement va_start. */
13804 static void
13805 rs6000_va_start (tree valist, rtx nextarg)
13807 HOST_WIDE_INT words, n_gpr, n_fpr;
13808 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
13809 tree gpr, fpr, ovf, sav, t;
13811 /* Only SVR4 needs something special. */
13812 if (DEFAULT_ABI != ABI_V4)
13814 std_expand_builtin_va_start (valist, nextarg);
13815 return;
13818 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
13819 f_fpr = DECL_CHAIN (f_gpr);
13820 f_res = DECL_CHAIN (f_fpr);
13821 f_ovf = DECL_CHAIN (f_res);
13822 f_sav = DECL_CHAIN (f_ovf);
13824 valist = build_simple_mem_ref (valist);
13825 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
13826 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
13827 f_fpr, NULL_TREE);
13828 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
13829 f_ovf, NULL_TREE);
13830 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
13831 f_sav, NULL_TREE);
13833 /* Count number of gp and fp argument registers used. */
13834 words = crtl->args.info.words;
13835 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
13836 GP_ARG_NUM_REG);
13837 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
13838 FP_ARG_NUM_REG);
13840 if (TARGET_DEBUG_ARG)
13841 fprintf (stderr, "va_start: words = " HOST_WIDE_INT_PRINT_DEC", n_gpr = "
13842 HOST_WIDE_INT_PRINT_DEC", n_fpr = " HOST_WIDE_INT_PRINT_DEC"\n",
13843 words, n_gpr, n_fpr);
13845 if (cfun->va_list_gpr_size)
13847 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
13848 build_int_cst (NULL_TREE, n_gpr));
13849 TREE_SIDE_EFFECTS (t) = 1;
13850 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13853 if (cfun->va_list_fpr_size)
13855 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
13856 build_int_cst (NULL_TREE, n_fpr));
13857 TREE_SIDE_EFFECTS (t) = 1;
13858 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13860 #ifdef HAVE_AS_GNU_ATTRIBUTE
13861 if (call_ABI_of_interest (cfun->decl))
13862 rs6000_passes_float = true;
13863 #endif
13866 /* Find the overflow area. */
13867 t = make_tree (TREE_TYPE (ovf), crtl->args.internal_arg_pointer);
13868 if (words != 0)
13869 t = fold_build_pointer_plus_hwi (t, words * MIN_UNITS_PER_WORD);
13870 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
13871 TREE_SIDE_EFFECTS (t) = 1;
13872 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13874 /* If there were no va_arg invocations, don't set up the register
13875 save area. */
13876 if (!cfun->va_list_gpr_size
13877 && !cfun->va_list_fpr_size
13878 && n_gpr < GP_ARG_NUM_REG
13879 && n_fpr < FP_ARG_V4_MAX_REG)
13880 return;
13882 /* Find the register save area. */
13883 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
13884 if (cfun->machine->varargs_save_offset)
13885 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
13886 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
13887 TREE_SIDE_EFFECTS (t) = 1;
13888 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13891 /* Implement va_arg. */
13893 static tree
13894 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
13895 gimple_seq *post_p)
13897 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
13898 tree gpr, fpr, ovf, sav, reg, t, u;
13899 int size, rsize, n_reg, sav_ofs, sav_scale;
13900 tree lab_false, lab_over, addr;
13901 int align;
13902 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
13903 int regalign = 0;
13904 gimple *stmt;
13906 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
13908 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
13909 return build_va_arg_indirect_ref (t);
13912 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
13913 earlier version of gcc, with the property that it always applied alignment
13914 adjustments to the va-args (even for zero-sized types). The cheapest way
13915 to deal with this is to replicate the effect of the part of
13916 std_gimplify_va_arg_expr that carries out the align adjust, for the case
13917 of relevance.
13918 We don't need to check for pass-by-reference because of the test above.
13919 We can return a simplifed answer, since we know there's no offset to add. */
13921 if (((TARGET_MACHO
13922 && rs6000_darwin64_abi)
13923 || DEFAULT_ABI == ABI_ELFv2
13924 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
13925 && integer_zerop (TYPE_SIZE (type)))
13927 unsigned HOST_WIDE_INT align, boundary;
13928 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
13929 align = PARM_BOUNDARY / BITS_PER_UNIT;
13930 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
13931 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
13932 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
13933 boundary /= BITS_PER_UNIT;
13934 if (boundary > align)
13936 tree t ;
13937 /* This updates arg ptr by the amount that would be necessary
13938 to align the zero-sized (but not zero-alignment) item. */
13939 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
13940 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
13941 gimplify_and_add (t, pre_p);
13943 t = fold_convert (sizetype, valist_tmp);
13944 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
13945 fold_convert (TREE_TYPE (valist),
13946 fold_build2 (BIT_AND_EXPR, sizetype, t,
13947 size_int (-boundary))));
13948 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
13949 gimplify_and_add (t, pre_p);
13951 /* Since it is zero-sized there's no increment for the item itself. */
13952 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
13953 return build_va_arg_indirect_ref (valist_tmp);
13956 if (DEFAULT_ABI != ABI_V4)
13958 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
13960 tree elem_type = TREE_TYPE (type);
13961 machine_mode elem_mode = TYPE_MODE (elem_type);
13962 int elem_size = GET_MODE_SIZE (elem_mode);
13964 if (elem_size < UNITS_PER_WORD)
13966 tree real_part, imag_part;
13967 gimple_seq post = NULL;
13969 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
13970 &post);
13971 /* Copy the value into a temporary, lest the formal temporary
13972 be reused out from under us. */
13973 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
13974 gimple_seq_add_seq (pre_p, post);
13976 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
13977 post_p);
13979 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
13983 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
13986 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
13987 f_fpr = DECL_CHAIN (f_gpr);
13988 f_res = DECL_CHAIN (f_fpr);
13989 f_ovf = DECL_CHAIN (f_res);
13990 f_sav = DECL_CHAIN (f_ovf);
13992 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
13993 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
13994 f_fpr, NULL_TREE);
13995 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
13996 f_ovf, NULL_TREE);
13997 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
13998 f_sav, NULL_TREE);
14000 size = int_size_in_bytes (type);
14001 rsize = (size + 3) / 4;
14002 int pad = 4 * rsize - size;
14003 align = 1;
14005 machine_mode mode = TYPE_MODE (type);
14006 if (abi_v4_pass_in_fpr (mode))
14008 /* FP args go in FP registers, if present. */
14009 reg = fpr;
14010 n_reg = (size + 7) / 8;
14011 sav_ofs = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4) * 4;
14012 sav_scale = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4);
14013 if (mode != SFmode && mode != SDmode)
14014 align = 8;
14016 else
14018 /* Otherwise into GP registers. */
14019 reg = gpr;
14020 n_reg = rsize;
14021 sav_ofs = 0;
14022 sav_scale = 4;
14023 if (n_reg == 2)
14024 align = 8;
14027 /* Pull the value out of the saved registers.... */
14029 lab_over = NULL;
14030 addr = create_tmp_var (ptr_type_node, "addr");
14032 /* AltiVec vectors never go in registers when -mabi=altivec. */
14033 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
14034 align = 16;
14035 else
14037 lab_false = create_artificial_label (input_location);
14038 lab_over = create_artificial_label (input_location);
14040 /* Long long and SPE vectors are aligned in the registers.
14041 As are any other 2 gpr item such as complex int due to a
14042 historical mistake. */
14043 u = reg;
14044 if (n_reg == 2 && reg == gpr)
14046 regalign = 1;
14047 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
14048 build_int_cst (TREE_TYPE (reg), n_reg - 1));
14049 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
14050 unshare_expr (reg), u);
14052 /* _Decimal128 is passed in even/odd fpr pairs; the stored
14053 reg number is 0 for f1, so we want to make it odd. */
14054 else if (reg == fpr && mode == TDmode)
14056 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
14057 build_int_cst (TREE_TYPE (reg), 1));
14058 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
14061 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
14062 t = build2 (GE_EXPR, boolean_type_node, u, t);
14063 u = build1 (GOTO_EXPR, void_type_node, lab_false);
14064 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
14065 gimplify_and_add (t, pre_p);
14067 t = sav;
14068 if (sav_ofs)
14069 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
14071 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
14072 build_int_cst (TREE_TYPE (reg), n_reg));
14073 u = fold_convert (sizetype, u);
14074 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
14075 t = fold_build_pointer_plus (t, u);
14077 /* _Decimal32 varargs are located in the second word of the 64-bit
14078 FP register for 32-bit binaries. */
14079 if (TARGET_32BIT
14080 && TARGET_HARD_FLOAT && TARGET_FPRS
14081 && mode == SDmode)
14082 t = fold_build_pointer_plus_hwi (t, size);
14084 /* Args are passed right-aligned. */
14085 if (BYTES_BIG_ENDIAN)
14086 t = fold_build_pointer_plus_hwi (t, pad);
14088 gimplify_assign (addr, t, pre_p);
14090 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
14092 stmt = gimple_build_label (lab_false);
14093 gimple_seq_add_stmt (pre_p, stmt);
14095 if ((n_reg == 2 && !regalign) || n_reg > 2)
14097 /* Ensure that we don't find any more args in regs.
14098 Alignment has taken care of for special cases. */
14099 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
14103 /* ... otherwise out of the overflow area. */
14105 /* Care for on-stack alignment if needed. */
14106 t = ovf;
14107 if (align != 1)
14109 t = fold_build_pointer_plus_hwi (t, align - 1);
14110 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
14111 build_int_cst (TREE_TYPE (t), -align));
14114 /* Args are passed right-aligned. */
14115 if (BYTES_BIG_ENDIAN)
14116 t = fold_build_pointer_plus_hwi (t, pad);
14118 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
14120 gimplify_assign (unshare_expr (addr), t, pre_p);
14122 t = fold_build_pointer_plus_hwi (t, size);
14123 gimplify_assign (unshare_expr (ovf), t, pre_p);
14125 if (lab_over)
14127 stmt = gimple_build_label (lab_over);
14128 gimple_seq_add_stmt (pre_p, stmt);
14131 if (STRICT_ALIGNMENT
14132 && (TYPE_ALIGN (type)
14133 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
14135 /* The value (of type complex double, for example) may not be
14136 aligned in memory in the saved registers, so copy via a
14137 temporary. (This is the same code as used for SPARC.) */
14138 tree tmp = create_tmp_var (type, "va_arg_tmp");
14139 tree dest_addr = build_fold_addr_expr (tmp);
14141 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
14142 3, dest_addr, addr, size_int (rsize * 4));
14144 gimplify_and_add (copy, pre_p);
14145 addr = dest_addr;
14148 addr = fold_convert (ptrtype, addr);
14149 return build_va_arg_indirect_ref (addr);
14152 /* Builtins. */
14154 static void
14155 def_builtin (const char *name, tree type, enum rs6000_builtins code)
14157 tree t;
14158 unsigned classify = rs6000_builtin_info[(int)code].attr;
14159 const char *attr_string = "";
14161 gcc_assert (name != NULL);
14162 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
14164 if (rs6000_builtin_decls[(int)code])
14165 fatal_error (input_location,
14166 "internal error: builtin function %s already processed", name);
14168 rs6000_builtin_decls[(int)code] = t =
14169 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
14171 /* Set any special attributes. */
14172 if ((classify & RS6000_BTC_CONST) != 0)
14174 /* const function, function only depends on the inputs. */
14175 TREE_READONLY (t) = 1;
14176 TREE_NOTHROW (t) = 1;
14177 attr_string = ", const";
14179 else if ((classify & RS6000_BTC_PURE) != 0)
14181 /* pure function, function can read global memory, but does not set any
14182 external state. */
14183 DECL_PURE_P (t) = 1;
14184 TREE_NOTHROW (t) = 1;
14185 attr_string = ", pure";
14187 else if ((classify & RS6000_BTC_FP) != 0)
14189 /* Function is a math function. If rounding mode is on, then treat the
14190 function as not reading global memory, but it can have arbitrary side
14191 effects. If it is off, then assume the function is a const function.
14192 This mimics the ATTR_MATHFN_FPROUNDING attribute in
14193 builtin-attribute.def that is used for the math functions. */
14194 TREE_NOTHROW (t) = 1;
14195 if (flag_rounding_math)
14197 DECL_PURE_P (t) = 1;
14198 DECL_IS_NOVOPS (t) = 1;
14199 attr_string = ", fp, pure";
14201 else
14203 TREE_READONLY (t) = 1;
14204 attr_string = ", fp, const";
14207 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
14208 gcc_unreachable ();
14210 if (TARGET_DEBUG_BUILTIN)
14211 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
14212 (int)code, name, attr_string);
14215 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
14217 #undef RS6000_BUILTIN_0
14218 #undef RS6000_BUILTIN_1
14219 #undef RS6000_BUILTIN_2
14220 #undef RS6000_BUILTIN_3
14221 #undef RS6000_BUILTIN_A
14222 #undef RS6000_BUILTIN_D
14223 #undef RS6000_BUILTIN_E
14224 #undef RS6000_BUILTIN_H
14225 #undef RS6000_BUILTIN_P
14226 #undef RS6000_BUILTIN_Q
14227 #undef RS6000_BUILTIN_S
14228 #undef RS6000_BUILTIN_X
14230 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
14231 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
14232 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
14233 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
14234 { MASK, ICODE, NAME, ENUM },
14236 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
14237 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
14238 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
14239 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
14240 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
14241 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
14242 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
14243 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
14245 static const struct builtin_description bdesc_3arg[] =
14247 #include "powerpcspe-builtin.def"
14250 /* DST operations: void foo (void *, const int, const char). */
14252 #undef RS6000_BUILTIN_0
14253 #undef RS6000_BUILTIN_1
14254 #undef RS6000_BUILTIN_2
14255 #undef RS6000_BUILTIN_3
14256 #undef RS6000_BUILTIN_A
14257 #undef RS6000_BUILTIN_D
14258 #undef RS6000_BUILTIN_E
14259 #undef RS6000_BUILTIN_H
14260 #undef RS6000_BUILTIN_P
14261 #undef RS6000_BUILTIN_Q
14262 #undef RS6000_BUILTIN_S
14263 #undef RS6000_BUILTIN_X
14265 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
14266 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
14267 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
14268 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
14269 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
14270 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
14271 { MASK, ICODE, NAME, ENUM },
14273 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
14274 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
14275 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
14276 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
14277 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
14278 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
14280 static const struct builtin_description bdesc_dst[] =
14282 #include "powerpcspe-builtin.def"
14285 /* Simple binary operations: VECc = foo (VECa, VECb). */
14287 #undef RS6000_BUILTIN_0
14288 #undef RS6000_BUILTIN_1
14289 #undef RS6000_BUILTIN_2
14290 #undef RS6000_BUILTIN_3
14291 #undef RS6000_BUILTIN_A
14292 #undef RS6000_BUILTIN_D
14293 #undef RS6000_BUILTIN_E
14294 #undef RS6000_BUILTIN_H
14295 #undef RS6000_BUILTIN_P
14296 #undef RS6000_BUILTIN_Q
14297 #undef RS6000_BUILTIN_S
14298 #undef RS6000_BUILTIN_X
14300 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
14301 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
14302 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
14303 { MASK, ICODE, NAME, ENUM },
14305 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
14306 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
14307 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
14308 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
14309 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
14310 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
14311 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
14312 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
14313 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
14315 static const struct builtin_description bdesc_2arg[] =
14317 #include "powerpcspe-builtin.def"
14320 #undef RS6000_BUILTIN_0
14321 #undef RS6000_BUILTIN_1
14322 #undef RS6000_BUILTIN_2
14323 #undef RS6000_BUILTIN_3
14324 #undef RS6000_BUILTIN_A
14325 #undef RS6000_BUILTIN_D
14326 #undef RS6000_BUILTIN_E
14327 #undef RS6000_BUILTIN_H
14328 #undef RS6000_BUILTIN_P
14329 #undef RS6000_BUILTIN_Q
14330 #undef RS6000_BUILTIN_S
14331 #undef RS6000_BUILTIN_X
14333 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
14334 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
14335 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
14336 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
14337 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
14338 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
14339 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
14340 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
14341 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
14342 { MASK, ICODE, NAME, ENUM },
14344 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
14345 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
14346 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
14348 /* AltiVec predicates. */
14350 static const struct builtin_description bdesc_altivec_preds[] =
14352 #include "powerpcspe-builtin.def"
14355 /* SPE predicates. */
14356 #undef RS6000_BUILTIN_0
14357 #undef RS6000_BUILTIN_1
14358 #undef RS6000_BUILTIN_2
14359 #undef RS6000_BUILTIN_3
14360 #undef RS6000_BUILTIN_A
14361 #undef RS6000_BUILTIN_D
14362 #undef RS6000_BUILTIN_E
14363 #undef RS6000_BUILTIN_H
14364 #undef RS6000_BUILTIN_P
14365 #undef RS6000_BUILTIN_Q
14366 #undef RS6000_BUILTIN_S
14367 #undef RS6000_BUILTIN_X
14369 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
14370 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
14371 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
14372 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
14373 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
14374 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
14375 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
14376 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
14377 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
14378 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
14379 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
14380 { MASK, ICODE, NAME, ENUM },
14382 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
14384 static const struct builtin_description bdesc_spe_predicates[] =
14386 #include "powerpcspe-builtin.def"
14389 /* SPE evsel predicates. */
14390 #undef RS6000_BUILTIN_0
14391 #undef RS6000_BUILTIN_1
14392 #undef RS6000_BUILTIN_2
14393 #undef RS6000_BUILTIN_3
14394 #undef RS6000_BUILTIN_A
14395 #undef RS6000_BUILTIN_D
14396 #undef RS6000_BUILTIN_E
14397 #undef RS6000_BUILTIN_H
14398 #undef RS6000_BUILTIN_P
14399 #undef RS6000_BUILTIN_Q
14400 #undef RS6000_BUILTIN_S
14401 #undef RS6000_BUILTIN_X
14403 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
14404 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
14405 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
14406 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
14407 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
14408 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
14409 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
14410 { MASK, ICODE, NAME, ENUM },
14412 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
14413 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
14414 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
14415 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
14416 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
14418 static const struct builtin_description bdesc_spe_evsel[] =
14420 #include "powerpcspe-builtin.def"
14423 /* PAIRED predicates. */
14424 #undef RS6000_BUILTIN_0
14425 #undef RS6000_BUILTIN_1
14426 #undef RS6000_BUILTIN_2
14427 #undef RS6000_BUILTIN_3
14428 #undef RS6000_BUILTIN_A
14429 #undef RS6000_BUILTIN_D
14430 #undef RS6000_BUILTIN_E
14431 #undef RS6000_BUILTIN_H
14432 #undef RS6000_BUILTIN_P
14433 #undef RS6000_BUILTIN_Q
14434 #undef RS6000_BUILTIN_S
14435 #undef RS6000_BUILTIN_X
14437 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
14438 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
14439 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
14440 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
14441 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
14442 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
14443 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
14444 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
14445 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
14446 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
14447 { MASK, ICODE, NAME, ENUM },
14449 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
14450 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
14452 static const struct builtin_description bdesc_paired_preds[] =
14454 #include "powerpcspe-builtin.def"
14457 /* ABS* operations. */
14459 #undef RS6000_BUILTIN_0
14460 #undef RS6000_BUILTIN_1
14461 #undef RS6000_BUILTIN_2
14462 #undef RS6000_BUILTIN_3
14463 #undef RS6000_BUILTIN_A
14464 #undef RS6000_BUILTIN_D
14465 #undef RS6000_BUILTIN_E
14466 #undef RS6000_BUILTIN_H
14467 #undef RS6000_BUILTIN_P
14468 #undef RS6000_BUILTIN_Q
14469 #undef RS6000_BUILTIN_S
14470 #undef RS6000_BUILTIN_X
14472 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
14473 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
14474 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
14475 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
14476 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
14477 { MASK, ICODE, NAME, ENUM },
14479 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
14480 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
14481 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
14482 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
14483 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
14484 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
14485 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
14487 static const struct builtin_description bdesc_abs[] =
14489 #include "powerpcspe-builtin.def"
14492 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
14493 foo (VECa). */
14495 #undef RS6000_BUILTIN_0
14496 #undef RS6000_BUILTIN_1
14497 #undef RS6000_BUILTIN_2
14498 #undef RS6000_BUILTIN_3
14499 #undef RS6000_BUILTIN_A
14500 #undef RS6000_BUILTIN_D
14501 #undef RS6000_BUILTIN_E
14502 #undef RS6000_BUILTIN_H
14503 #undef RS6000_BUILTIN_P
14504 #undef RS6000_BUILTIN_Q
14505 #undef RS6000_BUILTIN_S
14506 #undef RS6000_BUILTIN_X
14508 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
14509 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
14510 { MASK, ICODE, NAME, ENUM },
14512 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
14513 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
14514 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
14515 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
14516 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
14517 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
14518 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
14519 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
14520 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
14521 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
14523 static const struct builtin_description bdesc_1arg[] =
14525 #include "powerpcspe-builtin.def"
14528 /* Simple no-argument operations: result = __builtin_darn_32 () */
14530 #undef RS6000_BUILTIN_0
14531 #undef RS6000_BUILTIN_1
14532 #undef RS6000_BUILTIN_2
14533 #undef RS6000_BUILTIN_3
14534 #undef RS6000_BUILTIN_A
14535 #undef RS6000_BUILTIN_D
14536 #undef RS6000_BUILTIN_E
14537 #undef RS6000_BUILTIN_H
14538 #undef RS6000_BUILTIN_P
14539 #undef RS6000_BUILTIN_Q
14540 #undef RS6000_BUILTIN_S
14541 #undef RS6000_BUILTIN_X
14543 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
14544 { MASK, ICODE, NAME, ENUM },
14546 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
14547 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
14548 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
14549 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
14550 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
14551 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
14552 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
14553 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
14554 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
14555 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
14556 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
14558 static const struct builtin_description bdesc_0arg[] =
14560 #include "powerpcspe-builtin.def"
14563 /* HTM builtins. */
14564 #undef RS6000_BUILTIN_0
14565 #undef RS6000_BUILTIN_1
14566 #undef RS6000_BUILTIN_2
14567 #undef RS6000_BUILTIN_3
14568 #undef RS6000_BUILTIN_A
14569 #undef RS6000_BUILTIN_D
14570 #undef RS6000_BUILTIN_E
14571 #undef RS6000_BUILTIN_H
14572 #undef RS6000_BUILTIN_P
14573 #undef RS6000_BUILTIN_Q
14574 #undef RS6000_BUILTIN_S
14575 #undef RS6000_BUILTIN_X
14577 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
14578 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
14579 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
14580 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
14581 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
14582 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
14583 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
14584 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
14585 { MASK, ICODE, NAME, ENUM },
14587 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
14588 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
14589 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
14590 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
14592 static const struct builtin_description bdesc_htm[] =
14594 #include "powerpcspe-builtin.def"
14597 #undef RS6000_BUILTIN_0
14598 #undef RS6000_BUILTIN_1
14599 #undef RS6000_BUILTIN_2
14600 #undef RS6000_BUILTIN_3
14601 #undef RS6000_BUILTIN_A
14602 #undef RS6000_BUILTIN_D
14603 #undef RS6000_BUILTIN_E
14604 #undef RS6000_BUILTIN_H
14605 #undef RS6000_BUILTIN_P
14606 #undef RS6000_BUILTIN_Q
14607 #undef RS6000_BUILTIN_S
14609 /* Return true if a builtin function is overloaded. */
14610 bool
14611 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
14613 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
14616 const char *
14617 rs6000_overloaded_builtin_name (enum rs6000_builtins fncode)
14619 return rs6000_builtin_info[(int)fncode].name;
14622 /* Expand an expression EXP that calls a builtin without arguments. */
14623 static rtx
14624 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
14626 rtx pat;
14627 machine_mode tmode = insn_data[icode].operand[0].mode;
14629 if (icode == CODE_FOR_nothing)
14630 /* Builtin not supported on this processor. */
14631 return 0;
14633 if (target == 0
14634 || GET_MODE (target) != tmode
14635 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14636 target = gen_reg_rtx (tmode);
14638 pat = GEN_FCN (icode) (target);
14639 if (! pat)
14640 return 0;
14641 emit_insn (pat);
14643 return target;
14647 static rtx
14648 rs6000_expand_mtfsf_builtin (enum insn_code icode, tree exp)
14650 rtx pat;
14651 tree arg0 = CALL_EXPR_ARG (exp, 0);
14652 tree arg1 = CALL_EXPR_ARG (exp, 1);
14653 rtx op0 = expand_normal (arg0);
14654 rtx op1 = expand_normal (arg1);
14655 machine_mode mode0 = insn_data[icode].operand[0].mode;
14656 machine_mode mode1 = insn_data[icode].operand[1].mode;
14658 if (icode == CODE_FOR_nothing)
14659 /* Builtin not supported on this processor. */
14660 return 0;
14662 /* If we got invalid arguments bail out before generating bad rtl. */
14663 if (arg0 == error_mark_node || arg1 == error_mark_node)
14664 return const0_rtx;
14666 if (GET_CODE (op0) != CONST_INT
14667 || INTVAL (op0) > 255
14668 || INTVAL (op0) < 0)
14670 error ("argument 1 must be an 8-bit field value");
14671 return const0_rtx;
14674 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14675 op0 = copy_to_mode_reg (mode0, op0);
14677 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
14678 op1 = copy_to_mode_reg (mode1, op1);
14680 pat = GEN_FCN (icode) (op0, op1);
14681 if (! pat)
14682 return const0_rtx;
14683 emit_insn (pat);
14685 return NULL_RTX;
14688 static rtx
14689 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
14691 rtx pat;
14692 tree arg0 = CALL_EXPR_ARG (exp, 0);
14693 rtx op0 = expand_normal (arg0);
14694 machine_mode tmode = insn_data[icode].operand[0].mode;
14695 machine_mode mode0 = insn_data[icode].operand[1].mode;
14697 if (icode == CODE_FOR_nothing)
14698 /* Builtin not supported on this processor. */
14699 return 0;
14701 /* If we got invalid arguments bail out before generating bad rtl. */
14702 if (arg0 == error_mark_node)
14703 return const0_rtx;
14705 if (icode == CODE_FOR_altivec_vspltisb
14706 || icode == CODE_FOR_altivec_vspltish
14707 || icode == CODE_FOR_altivec_vspltisw
14708 || icode == CODE_FOR_spe_evsplatfi
14709 || icode == CODE_FOR_spe_evsplati)
14711 /* Only allow 5-bit *signed* literals. */
14712 if (GET_CODE (op0) != CONST_INT
14713 || INTVAL (op0) > 15
14714 || INTVAL (op0) < -16)
14716 error ("argument 1 must be a 5-bit signed literal");
14717 return CONST0_RTX (tmode);
14721 if (target == 0
14722 || GET_MODE (target) != tmode
14723 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14724 target = gen_reg_rtx (tmode);
14726 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14727 op0 = copy_to_mode_reg (mode0, op0);
14729 pat = GEN_FCN (icode) (target, op0);
14730 if (! pat)
14731 return 0;
14732 emit_insn (pat);
14734 return target;
14737 static rtx
14738 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
14740 rtx pat, scratch1, scratch2;
14741 tree arg0 = CALL_EXPR_ARG (exp, 0);
14742 rtx op0 = expand_normal (arg0);
14743 machine_mode tmode = insn_data[icode].operand[0].mode;
14744 machine_mode mode0 = insn_data[icode].operand[1].mode;
14746 /* If we have invalid arguments, bail out before generating bad rtl. */
14747 if (arg0 == error_mark_node)
14748 return const0_rtx;
14750 if (target == 0
14751 || GET_MODE (target) != tmode
14752 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14753 target = gen_reg_rtx (tmode);
14755 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14756 op0 = copy_to_mode_reg (mode0, op0);
14758 scratch1 = gen_reg_rtx (mode0);
14759 scratch2 = gen_reg_rtx (mode0);
14761 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
14762 if (! pat)
14763 return 0;
14764 emit_insn (pat);
14766 return target;
14769 static rtx
14770 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
14772 rtx pat;
14773 tree arg0 = CALL_EXPR_ARG (exp, 0);
14774 tree arg1 = CALL_EXPR_ARG (exp, 1);
14775 rtx op0 = expand_normal (arg0);
14776 rtx op1 = expand_normal (arg1);
14777 machine_mode tmode = insn_data[icode].operand[0].mode;
14778 machine_mode mode0 = insn_data[icode].operand[1].mode;
14779 machine_mode mode1 = insn_data[icode].operand[2].mode;
14781 if (icode == CODE_FOR_nothing)
14782 /* Builtin not supported on this processor. */
14783 return 0;
14785 /* If we got invalid arguments bail out before generating bad rtl. */
14786 if (arg0 == error_mark_node || arg1 == error_mark_node)
14787 return const0_rtx;
14789 if (icode == CODE_FOR_altivec_vcfux
14790 || icode == CODE_FOR_altivec_vcfsx
14791 || icode == CODE_FOR_altivec_vctsxs
14792 || icode == CODE_FOR_altivec_vctuxs
14793 || icode == CODE_FOR_altivec_vspltb
14794 || icode == CODE_FOR_altivec_vsplth
14795 || icode == CODE_FOR_altivec_vspltw
14796 || icode == CODE_FOR_spe_evaddiw
14797 || icode == CODE_FOR_spe_evldd
14798 || icode == CODE_FOR_spe_evldh
14799 || icode == CODE_FOR_spe_evldw
14800 || icode == CODE_FOR_spe_evlhhesplat
14801 || icode == CODE_FOR_spe_evlhhossplat
14802 || icode == CODE_FOR_spe_evlhhousplat
14803 || icode == CODE_FOR_spe_evlwhe
14804 || icode == CODE_FOR_spe_evlwhos
14805 || icode == CODE_FOR_spe_evlwhou
14806 || icode == CODE_FOR_spe_evlwhsplat
14807 || icode == CODE_FOR_spe_evlwwsplat
14808 || icode == CODE_FOR_spe_evrlwi
14809 || icode == CODE_FOR_spe_evslwi
14810 || icode == CODE_FOR_spe_evsrwis
14811 || icode == CODE_FOR_spe_evsubifw
14812 || icode == CODE_FOR_spe_evsrwiu)
14814 /* Only allow 5-bit unsigned literals. */
14815 STRIP_NOPS (arg1);
14816 if (TREE_CODE (arg1) != INTEGER_CST
14817 || TREE_INT_CST_LOW (arg1) & ~0x1f)
14819 error ("argument 2 must be a 5-bit unsigned literal");
14820 return CONST0_RTX (tmode);
14823 else if (icode == CODE_FOR_dfptstsfi_eq_dd
14824 || icode == CODE_FOR_dfptstsfi_lt_dd
14825 || icode == CODE_FOR_dfptstsfi_gt_dd
14826 || icode == CODE_FOR_dfptstsfi_unordered_dd
14827 || icode == CODE_FOR_dfptstsfi_eq_td
14828 || icode == CODE_FOR_dfptstsfi_lt_td
14829 || icode == CODE_FOR_dfptstsfi_gt_td
14830 || icode == CODE_FOR_dfptstsfi_unordered_td)
14832 /* Only allow 6-bit unsigned literals. */
14833 STRIP_NOPS (arg0);
14834 if (TREE_CODE (arg0) != INTEGER_CST
14835 || !IN_RANGE (TREE_INT_CST_LOW (arg0), 0, 63))
14837 error ("argument 1 must be a 6-bit unsigned literal");
14838 return CONST0_RTX (tmode);
14841 else if (icode == CODE_FOR_xststdcdp
14842 || icode == CODE_FOR_xststdcsp
14843 || icode == CODE_FOR_xvtstdcdp
14844 || icode == CODE_FOR_xvtstdcsp)
14846 /* Only allow 7-bit unsigned literals. */
14847 STRIP_NOPS (arg1);
14848 if (TREE_CODE (arg1) != INTEGER_CST
14849 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 127))
14851 error ("argument 2 must be a 7-bit unsigned literal");
14852 return CONST0_RTX (tmode);
14856 if (target == 0
14857 || GET_MODE (target) != tmode
14858 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14859 target = gen_reg_rtx (tmode);
14861 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14862 op0 = copy_to_mode_reg (mode0, op0);
14863 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14864 op1 = copy_to_mode_reg (mode1, op1);
14866 pat = GEN_FCN (icode) (target, op0, op1);
14867 if (! pat)
14868 return 0;
14869 emit_insn (pat);
14871 return target;
14874 static rtx
14875 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
14877 rtx pat, scratch;
14878 tree cr6_form = CALL_EXPR_ARG (exp, 0);
14879 tree arg0 = CALL_EXPR_ARG (exp, 1);
14880 tree arg1 = CALL_EXPR_ARG (exp, 2);
14881 rtx op0 = expand_normal (arg0);
14882 rtx op1 = expand_normal (arg1);
14883 machine_mode tmode = SImode;
14884 machine_mode mode0 = insn_data[icode].operand[1].mode;
14885 machine_mode mode1 = insn_data[icode].operand[2].mode;
14886 int cr6_form_int;
14888 if (TREE_CODE (cr6_form) != INTEGER_CST)
14890 error ("argument 1 of __builtin_altivec_predicate must be a constant");
14891 return const0_rtx;
14893 else
14894 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
14896 gcc_assert (mode0 == mode1);
14898 /* If we have invalid arguments, bail out before generating bad rtl. */
14899 if (arg0 == error_mark_node || arg1 == error_mark_node)
14900 return const0_rtx;
14902 if (target == 0
14903 || GET_MODE (target) != tmode
14904 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14905 target = gen_reg_rtx (tmode);
14907 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14908 op0 = copy_to_mode_reg (mode0, op0);
14909 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14910 op1 = copy_to_mode_reg (mode1, op1);
14912 /* Note that for many of the relevant operations (e.g. cmpne or
14913 cmpeq) with float or double operands, it makes more sense for the
14914 mode of the allocated scratch register to select a vector of
14915 integer. But the choice to copy the mode of operand 0 was made
14916 long ago and there are no plans to change it. */
14917 scratch = gen_reg_rtx (mode0);
14919 pat = GEN_FCN (icode) (scratch, op0, op1);
14920 if (! pat)
14921 return 0;
14922 emit_insn (pat);
14924 /* The vec_any* and vec_all* predicates use the same opcodes for two
14925 different operations, but the bits in CR6 will be different
14926 depending on what information we want. So we have to play tricks
14927 with CR6 to get the right bits out.
14929 If you think this is disgusting, look at the specs for the
14930 AltiVec predicates. */
14932 switch (cr6_form_int)
14934 case 0:
14935 emit_insn (gen_cr6_test_for_zero (target));
14936 break;
14937 case 1:
14938 emit_insn (gen_cr6_test_for_zero_reverse (target));
14939 break;
14940 case 2:
14941 emit_insn (gen_cr6_test_for_lt (target));
14942 break;
14943 case 3:
14944 emit_insn (gen_cr6_test_for_lt_reverse (target));
14945 break;
14946 default:
14947 error ("argument 1 of __builtin_altivec_predicate is out of range");
14948 break;
14951 return target;
14954 static rtx
14955 paired_expand_lv_builtin (enum insn_code icode, tree exp, rtx target)
14957 rtx pat, addr;
14958 tree arg0 = CALL_EXPR_ARG (exp, 0);
14959 tree arg1 = CALL_EXPR_ARG (exp, 1);
14960 machine_mode tmode = insn_data[icode].operand[0].mode;
14961 machine_mode mode0 = Pmode;
14962 machine_mode mode1 = Pmode;
14963 rtx op0 = expand_normal (arg0);
14964 rtx op1 = expand_normal (arg1);
14966 if (icode == CODE_FOR_nothing)
14967 /* Builtin not supported on this processor. */
14968 return 0;
14970 /* If we got invalid arguments bail out before generating bad rtl. */
14971 if (arg0 == error_mark_node || arg1 == error_mark_node)
14972 return const0_rtx;
14974 if (target == 0
14975 || GET_MODE (target) != tmode
14976 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14977 target = gen_reg_rtx (tmode);
14979 op1 = copy_to_mode_reg (mode1, op1);
14981 if (op0 == const0_rtx)
14983 addr = gen_rtx_MEM (tmode, op1);
14985 else
14987 op0 = copy_to_mode_reg (mode0, op0);
14988 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op0, op1));
14991 pat = GEN_FCN (icode) (target, addr);
14993 if (! pat)
14994 return 0;
14995 emit_insn (pat);
14997 return target;
15000 /* Return a constant vector for use as a little-endian permute control vector
15001 to reverse the order of elements of the given vector mode. */
15002 static rtx
15003 swap_selector_for_mode (machine_mode mode)
15005 /* These are little endian vectors, so their elements are reversed
15006 from what you would normally expect for a permute control vector. */
15007 unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
15008 unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
15009 unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
15010 unsigned int swap16[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
15011 unsigned int *swaparray, i;
15012 rtx perm[16];
15014 switch (mode)
15016 case V2DFmode:
15017 case V2DImode:
15018 swaparray = swap2;
15019 break;
15020 case V4SFmode:
15021 case V4SImode:
15022 swaparray = swap4;
15023 break;
15024 case V8HImode:
15025 swaparray = swap8;
15026 break;
15027 case V16QImode:
15028 swaparray = swap16;
15029 break;
15030 default:
15031 gcc_unreachable ();
15034 for (i = 0; i < 16; ++i)
15035 perm[i] = GEN_INT (swaparray[i]);
15037 return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm)));
15040 /* Generate code for an "lvxl", or "lve*x" built-in for a little endian target
15041 with -maltivec=be specified. Issue the load followed by an element-
15042 reversing permute. */
15043 void
15044 altivec_expand_lvx_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
15046 rtx tmp = gen_reg_rtx (mode);
15047 rtx load = gen_rtx_SET (tmp, op1);
15048 rtx lvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
15049 rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, load, lvx));
15050 rtx sel = swap_selector_for_mode (mode);
15051 rtx vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, tmp, tmp, sel), UNSPEC_VPERM);
15053 gcc_assert (REG_P (op0));
15054 emit_insn (par);
15055 emit_insn (gen_rtx_SET (op0, vperm));
15058 /* Generate code for a "stvxl" built-in for a little endian target with
15059 -maltivec=be specified. Issue the store preceded by an element-reversing
15060 permute. */
15061 void
15062 altivec_expand_stvx_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
15064 rtx tmp = gen_reg_rtx (mode);
15065 rtx store = gen_rtx_SET (op0, tmp);
15066 rtx stvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
15067 rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, store, stvx));
15068 rtx sel = swap_selector_for_mode (mode);
15069 rtx vperm;
15071 gcc_assert (REG_P (op1));
15072 vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
15073 emit_insn (gen_rtx_SET (tmp, vperm));
15074 emit_insn (par);
15077 /* Generate code for a "stve*x" built-in for a little endian target with -maltivec=be
15078 specified. Issue the store preceded by an element-reversing permute. */
15079 void
15080 altivec_expand_stvex_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
15082 machine_mode inner_mode = GET_MODE_INNER (mode);
15083 rtx tmp = gen_reg_rtx (mode);
15084 rtx stvx = gen_rtx_UNSPEC (inner_mode, gen_rtvec (1, tmp), unspec);
15085 rtx sel = swap_selector_for_mode (mode);
15086 rtx vperm;
15088 gcc_assert (REG_P (op1));
15089 vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
15090 emit_insn (gen_rtx_SET (tmp, vperm));
15091 emit_insn (gen_rtx_SET (op0, stvx));
15094 static rtx
15095 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
15097 rtx pat, addr;
15098 tree arg0 = CALL_EXPR_ARG (exp, 0);
15099 tree arg1 = CALL_EXPR_ARG (exp, 1);
15100 machine_mode tmode = insn_data[icode].operand[0].mode;
15101 machine_mode mode0 = Pmode;
15102 machine_mode mode1 = Pmode;
15103 rtx op0 = expand_normal (arg0);
15104 rtx op1 = expand_normal (arg1);
15106 if (icode == CODE_FOR_nothing)
15107 /* Builtin not supported on this processor. */
15108 return 0;
15110 /* If we got invalid arguments bail out before generating bad rtl. */
15111 if (arg0 == error_mark_node || arg1 == error_mark_node)
15112 return const0_rtx;
15114 if (target == 0
15115 || GET_MODE (target) != tmode
15116 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15117 target = gen_reg_rtx (tmode);
15119 op1 = copy_to_mode_reg (mode1, op1);
15121 /* For LVX, express the RTL accurately by ANDing the address with -16.
15122 LVXL and LVE*X expand to use UNSPECs to hide their special behavior,
15123 so the raw address is fine. */
15124 if (icode == CODE_FOR_altivec_lvx_v2df_2op
15125 || icode == CODE_FOR_altivec_lvx_v2di_2op
15126 || icode == CODE_FOR_altivec_lvx_v4sf_2op
15127 || icode == CODE_FOR_altivec_lvx_v4si_2op
15128 || icode == CODE_FOR_altivec_lvx_v8hi_2op
15129 || icode == CODE_FOR_altivec_lvx_v16qi_2op)
15131 rtx rawaddr;
15132 if (op0 == const0_rtx)
15133 rawaddr = op1;
15134 else
15136 op0 = copy_to_mode_reg (mode0, op0);
15137 rawaddr = gen_rtx_PLUS (Pmode, op1, op0);
15139 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
15140 addr = gen_rtx_MEM (blk ? BLKmode : tmode, addr);
15142 /* For -maltivec=be, emit the load and follow it up with a
15143 permute to swap the elements. */
15144 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
15146 rtx temp = gen_reg_rtx (tmode);
15147 emit_insn (gen_rtx_SET (temp, addr));
15149 rtx sel = swap_selector_for_mode (tmode);
15150 rtx vperm = gen_rtx_UNSPEC (tmode, gen_rtvec (3, temp, temp, sel),
15151 UNSPEC_VPERM);
15152 emit_insn (gen_rtx_SET (target, vperm));
15154 else
15155 emit_insn (gen_rtx_SET (target, addr));
15157 else
15159 if (op0 == const0_rtx)
15160 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
15161 else
15163 op0 = copy_to_mode_reg (mode0, op0);
15164 addr = gen_rtx_MEM (blk ? BLKmode : tmode,
15165 gen_rtx_PLUS (Pmode, op1, op0));
15168 pat = GEN_FCN (icode) (target, addr);
15169 if (! pat)
15170 return 0;
15171 emit_insn (pat);
15174 return target;
15177 static rtx
15178 spe_expand_stv_builtin (enum insn_code icode, tree exp)
15180 tree arg0 = CALL_EXPR_ARG (exp, 0);
15181 tree arg1 = CALL_EXPR_ARG (exp, 1);
15182 tree arg2 = CALL_EXPR_ARG (exp, 2);
15183 rtx op0 = expand_normal (arg0);
15184 rtx op1 = expand_normal (arg1);
15185 rtx op2 = expand_normal (arg2);
15186 rtx pat;
15187 machine_mode mode0 = insn_data[icode].operand[0].mode;
15188 machine_mode mode1 = insn_data[icode].operand[1].mode;
15189 machine_mode mode2 = insn_data[icode].operand[2].mode;
15191 /* Invalid arguments. Bail before doing anything stoopid! */
15192 if (arg0 == error_mark_node
15193 || arg1 == error_mark_node
15194 || arg2 == error_mark_node)
15195 return const0_rtx;
15197 if (! (*insn_data[icode].operand[2].predicate) (op0, mode2))
15198 op0 = copy_to_mode_reg (mode2, op0);
15199 if (! (*insn_data[icode].operand[0].predicate) (op1, mode0))
15200 op1 = copy_to_mode_reg (mode0, op1);
15201 if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
15202 op2 = copy_to_mode_reg (mode1, op2);
15204 pat = GEN_FCN (icode) (op1, op2, op0);
15205 if (pat)
15206 emit_insn (pat);
15207 return NULL_RTX;
15210 static rtx
15211 paired_expand_stv_builtin (enum insn_code icode, tree exp)
15213 tree arg0 = CALL_EXPR_ARG (exp, 0);
15214 tree arg1 = CALL_EXPR_ARG (exp, 1);
15215 tree arg2 = CALL_EXPR_ARG (exp, 2);
15216 rtx op0 = expand_normal (arg0);
15217 rtx op1 = expand_normal (arg1);
15218 rtx op2 = expand_normal (arg2);
15219 rtx pat, addr;
15220 machine_mode tmode = insn_data[icode].operand[0].mode;
15221 machine_mode mode1 = Pmode;
15222 machine_mode mode2 = Pmode;
15224 /* Invalid arguments. Bail before doing anything stoopid! */
15225 if (arg0 == error_mark_node
15226 || arg1 == error_mark_node
15227 || arg2 == error_mark_node)
15228 return const0_rtx;
15230 if (! (*insn_data[icode].operand[1].predicate) (op0, tmode))
15231 op0 = copy_to_mode_reg (tmode, op0);
15233 op2 = copy_to_mode_reg (mode2, op2);
15235 if (op1 == const0_rtx)
15237 addr = gen_rtx_MEM (tmode, op2);
15239 else
15241 op1 = copy_to_mode_reg (mode1, op1);
15242 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
15245 pat = GEN_FCN (icode) (addr, op0);
15246 if (pat)
15247 emit_insn (pat);
15248 return NULL_RTX;
15251 static rtx
15252 altivec_expand_stxvl_builtin (enum insn_code icode, tree exp)
15254 rtx pat;
15255 tree arg0 = CALL_EXPR_ARG (exp, 0);
15256 tree arg1 = CALL_EXPR_ARG (exp, 1);
15257 tree arg2 = CALL_EXPR_ARG (exp, 2);
15258 rtx op0 = expand_normal (arg0);
15259 rtx op1 = expand_normal (arg1);
15260 rtx op2 = expand_normal (arg2);
15261 machine_mode mode0 = insn_data[icode].operand[0].mode;
15262 machine_mode mode1 = insn_data[icode].operand[1].mode;
15263 machine_mode mode2 = insn_data[icode].operand[2].mode;
15265 if (icode == CODE_FOR_nothing)
15266 /* Builtin not supported on this processor. */
15267 return NULL_RTX;
15269 /* If we got invalid arguments bail out before generating bad rtl. */
15270 if (arg0 == error_mark_node
15271 || arg1 == error_mark_node
15272 || arg2 == error_mark_node)
15273 return NULL_RTX;
15275 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15276 op0 = copy_to_mode_reg (mode0, op0);
15277 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
15278 op1 = copy_to_mode_reg (mode1, op1);
15279 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
15280 op2 = copy_to_mode_reg (mode2, op2);
15282 pat = GEN_FCN (icode) (op0, op1, op2);
15283 if (pat)
15284 emit_insn (pat);
15286 return NULL_RTX;
15289 static rtx
15290 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
15292 tree arg0 = CALL_EXPR_ARG (exp, 0);
15293 tree arg1 = CALL_EXPR_ARG (exp, 1);
15294 tree arg2 = CALL_EXPR_ARG (exp, 2);
15295 rtx op0 = expand_normal (arg0);
15296 rtx op1 = expand_normal (arg1);
15297 rtx op2 = expand_normal (arg2);
15298 rtx pat, addr, rawaddr;
15299 machine_mode tmode = insn_data[icode].operand[0].mode;
15300 machine_mode smode = insn_data[icode].operand[1].mode;
15301 machine_mode mode1 = Pmode;
15302 machine_mode mode2 = Pmode;
15304 /* Invalid arguments. Bail before doing anything stoopid! */
15305 if (arg0 == error_mark_node
15306 || arg1 == error_mark_node
15307 || arg2 == error_mark_node)
15308 return const0_rtx;
15310 op2 = copy_to_mode_reg (mode2, op2);
15312 /* For STVX, express the RTL accurately by ANDing the address with -16.
15313 STVXL and STVE*X expand to use UNSPECs to hide their special behavior,
15314 so the raw address is fine. */
15315 if (icode == CODE_FOR_altivec_stvx_v2df_2op
15316 || icode == CODE_FOR_altivec_stvx_v2di_2op
15317 || icode == CODE_FOR_altivec_stvx_v4sf_2op
15318 || icode == CODE_FOR_altivec_stvx_v4si_2op
15319 || icode == CODE_FOR_altivec_stvx_v8hi_2op
15320 || icode == CODE_FOR_altivec_stvx_v16qi_2op)
15322 if (op1 == const0_rtx)
15323 rawaddr = op2;
15324 else
15326 op1 = copy_to_mode_reg (mode1, op1);
15327 rawaddr = gen_rtx_PLUS (Pmode, op2, op1);
15330 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
15331 addr = gen_rtx_MEM (tmode, addr);
15333 op0 = copy_to_mode_reg (tmode, op0);
15335 /* For -maltivec=be, emit a permute to swap the elements, followed
15336 by the store. */
15337 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
15339 rtx temp = gen_reg_rtx (tmode);
15340 rtx sel = swap_selector_for_mode (tmode);
15341 rtx vperm = gen_rtx_UNSPEC (tmode, gen_rtvec (3, op0, op0, sel),
15342 UNSPEC_VPERM);
15343 emit_insn (gen_rtx_SET (temp, vperm));
15344 emit_insn (gen_rtx_SET (addr, temp));
15346 else
15347 emit_insn (gen_rtx_SET (addr, op0));
15349 else
15351 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
15352 op0 = copy_to_mode_reg (smode, op0);
15354 if (op1 == const0_rtx)
15355 addr = gen_rtx_MEM (tmode, op2);
15356 else
15358 op1 = copy_to_mode_reg (mode1, op1);
15359 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op2, op1));
15362 pat = GEN_FCN (icode) (addr, op0);
15363 if (pat)
15364 emit_insn (pat);
15367 return NULL_RTX;
15370 /* Return the appropriate SPR number associated with the given builtin. */
15371 static inline HOST_WIDE_INT
15372 htm_spr_num (enum rs6000_builtins code)
15374 if (code == HTM_BUILTIN_GET_TFHAR
15375 || code == HTM_BUILTIN_SET_TFHAR)
15376 return TFHAR_SPR;
15377 else if (code == HTM_BUILTIN_GET_TFIAR
15378 || code == HTM_BUILTIN_SET_TFIAR)
15379 return TFIAR_SPR;
15380 else if (code == HTM_BUILTIN_GET_TEXASR
15381 || code == HTM_BUILTIN_SET_TEXASR)
15382 return TEXASR_SPR;
15383 gcc_assert (code == HTM_BUILTIN_GET_TEXASRU
15384 || code == HTM_BUILTIN_SET_TEXASRU);
15385 return TEXASRU_SPR;
15388 /* Return the appropriate SPR regno associated with the given builtin. */
15389 static inline HOST_WIDE_INT
15390 htm_spr_regno (enum rs6000_builtins code)
15392 if (code == HTM_BUILTIN_GET_TFHAR
15393 || code == HTM_BUILTIN_SET_TFHAR)
15394 return TFHAR_REGNO;
15395 else if (code == HTM_BUILTIN_GET_TFIAR
15396 || code == HTM_BUILTIN_SET_TFIAR)
15397 return TFIAR_REGNO;
15398 gcc_assert (code == HTM_BUILTIN_GET_TEXASR
15399 || code == HTM_BUILTIN_SET_TEXASR
15400 || code == HTM_BUILTIN_GET_TEXASRU
15401 || code == HTM_BUILTIN_SET_TEXASRU);
15402 return TEXASR_REGNO;
15405 /* Return the correct ICODE value depending on whether we are
15406 setting or reading the HTM SPRs. */
15407 static inline enum insn_code
15408 rs6000_htm_spr_icode (bool nonvoid)
15410 if (nonvoid)
15411 return (TARGET_POWERPC64) ? CODE_FOR_htm_mfspr_di : CODE_FOR_htm_mfspr_si;
15412 else
15413 return (TARGET_POWERPC64) ? CODE_FOR_htm_mtspr_di : CODE_FOR_htm_mtspr_si;
15416 /* Expand the HTM builtin in EXP and store the result in TARGET.
15417 Store true in *EXPANDEDP if we found a builtin to expand. */
15418 static rtx
15419 htm_expand_builtin (tree exp, rtx target, bool * expandedp)
15421 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15422 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
15423 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15424 const struct builtin_description *d;
15425 size_t i;
15427 *expandedp = true;
15429 if (!TARGET_POWERPC64
15430 && (fcode == HTM_BUILTIN_TABORTDC
15431 || fcode == HTM_BUILTIN_TABORTDCI))
15433 size_t uns_fcode = (size_t)fcode;
15434 const char *name = rs6000_builtin_info[uns_fcode].name;
15435 error ("builtin %s is only valid in 64-bit mode", name);
15436 return const0_rtx;
15439 /* Expand the HTM builtins. */
15440 d = bdesc_htm;
15441 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
15442 if (d->code == fcode)
15444 rtx op[MAX_HTM_OPERANDS], pat;
15445 int nopnds = 0;
15446 tree arg;
15447 call_expr_arg_iterator iter;
15448 unsigned attr = rs6000_builtin_info[fcode].attr;
15449 enum insn_code icode = d->icode;
15450 const struct insn_operand_data *insn_op;
15451 bool uses_spr = (attr & RS6000_BTC_SPR);
15452 rtx cr = NULL_RTX;
15454 if (uses_spr)
15455 icode = rs6000_htm_spr_icode (nonvoid);
15456 insn_op = &insn_data[icode].operand[0];
15458 if (nonvoid)
15460 machine_mode tmode = (uses_spr) ? insn_op->mode : E_SImode;
15461 if (!target
15462 || GET_MODE (target) != tmode
15463 || (uses_spr && !(*insn_op->predicate) (target, tmode)))
15464 target = gen_reg_rtx (tmode);
15465 if (uses_spr)
15466 op[nopnds++] = target;
15469 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
15471 if (arg == error_mark_node || nopnds >= MAX_HTM_OPERANDS)
15472 return const0_rtx;
15474 insn_op = &insn_data[icode].operand[nopnds];
15476 op[nopnds] = expand_normal (arg);
15478 if (!(*insn_op->predicate) (op[nopnds], insn_op->mode))
15480 if (!strcmp (insn_op->constraint, "n"))
15482 int arg_num = (nonvoid) ? nopnds : nopnds + 1;
15483 if (!CONST_INT_P (op[nopnds]))
15484 error ("argument %d must be an unsigned literal", arg_num);
15485 else
15486 error ("argument %d is an unsigned literal that is "
15487 "out of range", arg_num);
15488 return const0_rtx;
15490 op[nopnds] = copy_to_mode_reg (insn_op->mode, op[nopnds]);
15493 nopnds++;
15496 /* Handle the builtins for extended mnemonics. These accept
15497 no arguments, but map to builtins that take arguments. */
15498 switch (fcode)
15500 case HTM_BUILTIN_TENDALL: /* Alias for: tend. 1 */
15501 case HTM_BUILTIN_TRESUME: /* Alias for: tsr. 1 */
15502 op[nopnds++] = GEN_INT (1);
15503 if (flag_checking)
15504 attr |= RS6000_BTC_UNARY;
15505 break;
15506 case HTM_BUILTIN_TSUSPEND: /* Alias for: tsr. 0 */
15507 op[nopnds++] = GEN_INT (0);
15508 if (flag_checking)
15509 attr |= RS6000_BTC_UNARY;
15510 break;
15511 default:
15512 break;
15515 /* If this builtin accesses SPRs, then pass in the appropriate
15516 SPR number and SPR regno as the last two operands. */
15517 if (uses_spr)
15519 machine_mode mode = (TARGET_POWERPC64) ? DImode : SImode;
15520 op[nopnds++] = gen_rtx_CONST_INT (mode, htm_spr_num (fcode));
15521 op[nopnds++] = gen_rtx_REG (mode, htm_spr_regno (fcode));
15523 /* If this builtin accesses a CR, then pass in a scratch
15524 CR as the last operand. */
15525 else if (attr & RS6000_BTC_CR)
15526 { cr = gen_reg_rtx (CCmode);
15527 op[nopnds++] = cr;
15530 if (flag_checking)
15532 int expected_nopnds = 0;
15533 if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_UNARY)
15534 expected_nopnds = 1;
15535 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_BINARY)
15536 expected_nopnds = 2;
15537 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_TERNARY)
15538 expected_nopnds = 3;
15539 if (!(attr & RS6000_BTC_VOID))
15540 expected_nopnds += 1;
15541 if (uses_spr)
15542 expected_nopnds += 2;
15544 gcc_assert (nopnds == expected_nopnds
15545 && nopnds <= MAX_HTM_OPERANDS);
15548 switch (nopnds)
15550 case 1:
15551 pat = GEN_FCN (icode) (op[0]);
15552 break;
15553 case 2:
15554 pat = GEN_FCN (icode) (op[0], op[1]);
15555 break;
15556 case 3:
15557 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
15558 break;
15559 case 4:
15560 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
15561 break;
15562 default:
15563 gcc_unreachable ();
15565 if (!pat)
15566 return NULL_RTX;
15567 emit_insn (pat);
15569 if (attr & RS6000_BTC_CR)
15571 if (fcode == HTM_BUILTIN_TBEGIN)
15573 /* Emit code to set TARGET to true or false depending on
15574 whether the tbegin. instruction successfully or failed
15575 to start a transaction. We do this by placing the 1's
15576 complement of CR's EQ bit into TARGET. */
15577 rtx scratch = gen_reg_rtx (SImode);
15578 emit_insn (gen_rtx_SET (scratch,
15579 gen_rtx_EQ (SImode, cr,
15580 const0_rtx)));
15581 emit_insn (gen_rtx_SET (target,
15582 gen_rtx_XOR (SImode, scratch,
15583 GEN_INT (1))));
15585 else
15587 /* Emit code to copy the 4-bit condition register field
15588 CR into the least significant end of register TARGET. */
15589 rtx scratch1 = gen_reg_rtx (SImode);
15590 rtx scratch2 = gen_reg_rtx (SImode);
15591 rtx subreg = simplify_gen_subreg (CCmode, scratch1, SImode, 0);
15592 emit_insn (gen_movcc (subreg, cr));
15593 emit_insn (gen_lshrsi3 (scratch2, scratch1, GEN_INT (28)));
15594 emit_insn (gen_andsi3 (target, scratch2, GEN_INT (0xf)));
15598 if (nonvoid)
15599 return target;
15600 return const0_rtx;
15603 *expandedp = false;
15604 return NULL_RTX;
15607 /* Expand the CPU builtin in FCODE and store the result in TARGET. */
15609 static rtx
15610 cpu_expand_builtin (enum rs6000_builtins fcode, tree exp ATTRIBUTE_UNUSED,
15611 rtx target)
15613 /* __builtin_cpu_init () is a nop, so expand to nothing. */
15614 if (fcode == RS6000_BUILTIN_CPU_INIT)
15615 return const0_rtx;
15617 if (target == 0 || GET_MODE (target) != SImode)
15618 target = gen_reg_rtx (SImode);
15620 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
15621 tree arg = TREE_OPERAND (CALL_EXPR_ARG (exp, 0), 0);
15622 if (TREE_CODE (arg) != STRING_CST)
15624 error ("builtin %s only accepts a string argument",
15625 rs6000_builtin_info[(size_t) fcode].name);
15626 return const0_rtx;
15629 if (fcode == RS6000_BUILTIN_CPU_IS)
15631 const char *cpu = TREE_STRING_POINTER (arg);
15632 rtx cpuid = NULL_RTX;
15633 for (size_t i = 0; i < ARRAY_SIZE (cpu_is_info); i++)
15634 if (strcmp (cpu, cpu_is_info[i].cpu) == 0)
15636 /* The CPUID value in the TCB is offset by _DL_FIRST_PLATFORM. */
15637 cpuid = GEN_INT (cpu_is_info[i].cpuid + _DL_FIRST_PLATFORM);
15638 break;
15640 if (cpuid == NULL_RTX)
15642 /* Invalid CPU argument. */
15643 error ("cpu %s is an invalid argument to builtin %s",
15644 cpu, rs6000_builtin_info[(size_t) fcode].name);
15645 return const0_rtx;
15648 rtx platform = gen_reg_rtx (SImode);
15649 rtx tcbmem = gen_const_mem (SImode,
15650 gen_rtx_PLUS (Pmode,
15651 gen_rtx_REG (Pmode, TLS_REGNUM),
15652 GEN_INT (TCB_PLATFORM_OFFSET)));
15653 emit_move_insn (platform, tcbmem);
15654 emit_insn (gen_eqsi3 (target, platform, cpuid));
15656 else if (fcode == RS6000_BUILTIN_CPU_SUPPORTS)
15658 const char *hwcap = TREE_STRING_POINTER (arg);
15659 rtx mask = NULL_RTX;
15660 int hwcap_offset;
15661 for (size_t i = 0; i < ARRAY_SIZE (cpu_supports_info); i++)
15662 if (strcmp (hwcap, cpu_supports_info[i].hwcap) == 0)
15664 mask = GEN_INT (cpu_supports_info[i].mask);
15665 hwcap_offset = TCB_HWCAP_OFFSET (cpu_supports_info[i].id);
15666 break;
15668 if (mask == NULL_RTX)
15670 /* Invalid HWCAP argument. */
15671 error ("hwcap %s is an invalid argument to builtin %s",
15672 hwcap, rs6000_builtin_info[(size_t) fcode].name);
15673 return const0_rtx;
15676 rtx tcb_hwcap = gen_reg_rtx (SImode);
15677 rtx tcbmem = gen_const_mem (SImode,
15678 gen_rtx_PLUS (Pmode,
15679 gen_rtx_REG (Pmode, TLS_REGNUM),
15680 GEN_INT (hwcap_offset)));
15681 emit_move_insn (tcb_hwcap, tcbmem);
15682 rtx scratch1 = gen_reg_rtx (SImode);
15683 emit_insn (gen_rtx_SET (scratch1, gen_rtx_AND (SImode, tcb_hwcap, mask)));
15684 rtx scratch2 = gen_reg_rtx (SImode);
15685 emit_insn (gen_eqsi3 (scratch2, scratch1, const0_rtx));
15686 emit_insn (gen_rtx_SET (target, gen_rtx_XOR (SImode, scratch2, const1_rtx)));
15689 /* Record that we have expanded a CPU builtin, so that we can later
15690 emit a reference to the special symbol exported by LIBC to ensure we
15691 do not link against an old LIBC that doesn't support this feature. */
15692 cpu_builtin_p = true;
15694 #else
15695 /* For old LIBCs, always return FALSE. */
15696 emit_move_insn (target, GEN_INT (0));
15697 #endif /* TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */
15699 return target;
15702 static rtx
15703 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
15705 rtx pat;
15706 tree arg0 = CALL_EXPR_ARG (exp, 0);
15707 tree arg1 = CALL_EXPR_ARG (exp, 1);
15708 tree arg2 = CALL_EXPR_ARG (exp, 2);
15709 rtx op0 = expand_normal (arg0);
15710 rtx op1 = expand_normal (arg1);
15711 rtx op2 = expand_normal (arg2);
15712 machine_mode tmode = insn_data[icode].operand[0].mode;
15713 machine_mode mode0 = insn_data[icode].operand[1].mode;
15714 machine_mode mode1 = insn_data[icode].operand[2].mode;
15715 machine_mode mode2 = insn_data[icode].operand[3].mode;
15717 if (icode == CODE_FOR_nothing)
15718 /* Builtin not supported on this processor. */
15719 return 0;
15721 /* If we got invalid arguments bail out before generating bad rtl. */
15722 if (arg0 == error_mark_node
15723 || arg1 == error_mark_node
15724 || arg2 == error_mark_node)
15725 return const0_rtx;
15727 /* Check and prepare argument depending on the instruction code.
15729 Note that a switch statement instead of the sequence of tests
15730 would be incorrect as many of the CODE_FOR values could be
15731 CODE_FOR_nothing and that would yield multiple alternatives
15732 with identical values. We'd never reach here at runtime in
15733 this case. */
15734 if (icode == CODE_FOR_altivec_vsldoi_v4sf
15735 || icode == CODE_FOR_altivec_vsldoi_v2df
15736 || icode == CODE_FOR_altivec_vsldoi_v4si
15737 || icode == CODE_FOR_altivec_vsldoi_v8hi
15738 || icode == CODE_FOR_altivec_vsldoi_v16qi)
15740 /* Only allow 4-bit unsigned literals. */
15741 STRIP_NOPS (arg2);
15742 if (TREE_CODE (arg2) != INTEGER_CST
15743 || TREE_INT_CST_LOW (arg2) & ~0xf)
15745 error ("argument 3 must be a 4-bit unsigned literal");
15746 return CONST0_RTX (tmode);
15749 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
15750 || icode == CODE_FOR_vsx_xxpermdi_v2di
15751 || icode == CODE_FOR_vsx_xxpermdi_v2df_be
15752 || icode == CODE_FOR_vsx_xxpermdi_v2di_be
15753 || icode == CODE_FOR_vsx_xxpermdi_v1ti
15754 || icode == CODE_FOR_vsx_xxpermdi_v4sf
15755 || icode == CODE_FOR_vsx_xxpermdi_v4si
15756 || icode == CODE_FOR_vsx_xxpermdi_v8hi
15757 || icode == CODE_FOR_vsx_xxpermdi_v16qi
15758 || icode == CODE_FOR_vsx_xxsldwi_v16qi
15759 || icode == CODE_FOR_vsx_xxsldwi_v8hi
15760 || icode == CODE_FOR_vsx_xxsldwi_v4si
15761 || icode == CODE_FOR_vsx_xxsldwi_v4sf
15762 || icode == CODE_FOR_vsx_xxsldwi_v2di
15763 || icode == CODE_FOR_vsx_xxsldwi_v2df)
15765 /* Only allow 2-bit unsigned literals. */
15766 STRIP_NOPS (arg2);
15767 if (TREE_CODE (arg2) != INTEGER_CST
15768 || TREE_INT_CST_LOW (arg2) & ~0x3)
15770 error ("argument 3 must be a 2-bit unsigned literal");
15771 return CONST0_RTX (tmode);
15774 else if (icode == CODE_FOR_vsx_set_v2df
15775 || icode == CODE_FOR_vsx_set_v2di
15776 || icode == CODE_FOR_bcdadd
15777 || icode == CODE_FOR_bcdadd_lt
15778 || icode == CODE_FOR_bcdadd_eq
15779 || icode == CODE_FOR_bcdadd_gt
15780 || icode == CODE_FOR_bcdsub
15781 || icode == CODE_FOR_bcdsub_lt
15782 || icode == CODE_FOR_bcdsub_eq
15783 || icode == CODE_FOR_bcdsub_gt)
15785 /* Only allow 1-bit unsigned literals. */
15786 STRIP_NOPS (arg2);
15787 if (TREE_CODE (arg2) != INTEGER_CST
15788 || TREE_INT_CST_LOW (arg2) & ~0x1)
15790 error ("argument 3 must be a 1-bit unsigned literal");
15791 return CONST0_RTX (tmode);
15794 else if (icode == CODE_FOR_dfp_ddedpd_dd
15795 || icode == CODE_FOR_dfp_ddedpd_td)
15797 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
15798 STRIP_NOPS (arg0);
15799 if (TREE_CODE (arg0) != INTEGER_CST
15800 || TREE_INT_CST_LOW (arg2) & ~0x3)
15802 error ("argument 1 must be 0 or 2");
15803 return CONST0_RTX (tmode);
15806 else if (icode == CODE_FOR_dfp_denbcd_dd
15807 || icode == CODE_FOR_dfp_denbcd_td)
15809 /* Only allow 1-bit unsigned literals. */
15810 STRIP_NOPS (arg0);
15811 if (TREE_CODE (arg0) != INTEGER_CST
15812 || TREE_INT_CST_LOW (arg0) & ~0x1)
15814 error ("argument 1 must be a 1-bit unsigned literal");
15815 return CONST0_RTX (tmode);
15818 else if (icode == CODE_FOR_dfp_dscli_dd
15819 || icode == CODE_FOR_dfp_dscli_td
15820 || icode == CODE_FOR_dfp_dscri_dd
15821 || icode == CODE_FOR_dfp_dscri_td)
15823 /* Only allow 6-bit unsigned literals. */
15824 STRIP_NOPS (arg1);
15825 if (TREE_CODE (arg1) != INTEGER_CST
15826 || TREE_INT_CST_LOW (arg1) & ~0x3f)
15828 error ("argument 2 must be a 6-bit unsigned literal");
15829 return CONST0_RTX (tmode);
15832 else if (icode == CODE_FOR_crypto_vshasigmaw
15833 || icode == CODE_FOR_crypto_vshasigmad)
15835 /* Check whether the 2nd and 3rd arguments are integer constants and in
15836 range and prepare arguments. */
15837 STRIP_NOPS (arg1);
15838 if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (arg1, 2))
15840 error ("argument 2 must be 0 or 1");
15841 return CONST0_RTX (tmode);
15844 STRIP_NOPS (arg2);
15845 if (TREE_CODE (arg2) != INTEGER_CST || wi::geu_p (arg2, 16))
15847 error ("argument 3 must be in the range 0..15");
15848 return CONST0_RTX (tmode);
15852 if (target == 0
15853 || GET_MODE (target) != tmode
15854 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15855 target = gen_reg_rtx (tmode);
15857 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15858 op0 = copy_to_mode_reg (mode0, op0);
15859 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
15860 op1 = copy_to_mode_reg (mode1, op1);
15861 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
15862 op2 = copy_to_mode_reg (mode2, op2);
15864 if (TARGET_PAIRED_FLOAT && icode == CODE_FOR_selv2sf4)
15865 pat = GEN_FCN (icode) (target, op0, op1, op2, CONST0_RTX (SFmode));
15866 else
15867 pat = GEN_FCN (icode) (target, op0, op1, op2);
15868 if (! pat)
15869 return 0;
15870 emit_insn (pat);
15872 return target;
15875 /* Expand the lvx builtins. */
15876 static rtx
15877 altivec_expand_ld_builtin (tree exp, rtx target, bool *expandedp)
15879 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15880 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
15881 tree arg0;
15882 machine_mode tmode, mode0;
15883 rtx pat, op0;
15884 enum insn_code icode;
15886 switch (fcode)
15888 case ALTIVEC_BUILTIN_LD_INTERNAL_16qi:
15889 icode = CODE_FOR_vector_altivec_load_v16qi;
15890 break;
15891 case ALTIVEC_BUILTIN_LD_INTERNAL_8hi:
15892 icode = CODE_FOR_vector_altivec_load_v8hi;
15893 break;
15894 case ALTIVEC_BUILTIN_LD_INTERNAL_4si:
15895 icode = CODE_FOR_vector_altivec_load_v4si;
15896 break;
15897 case ALTIVEC_BUILTIN_LD_INTERNAL_4sf:
15898 icode = CODE_FOR_vector_altivec_load_v4sf;
15899 break;
15900 case ALTIVEC_BUILTIN_LD_INTERNAL_2df:
15901 icode = CODE_FOR_vector_altivec_load_v2df;
15902 break;
15903 case ALTIVEC_BUILTIN_LD_INTERNAL_2di:
15904 icode = CODE_FOR_vector_altivec_load_v2di;
15905 break;
15906 case ALTIVEC_BUILTIN_LD_INTERNAL_1ti:
15907 icode = CODE_FOR_vector_altivec_load_v1ti;
15908 break;
15909 default:
15910 *expandedp = false;
15911 return NULL_RTX;
15914 *expandedp = true;
15916 arg0 = CALL_EXPR_ARG (exp, 0);
15917 op0 = expand_normal (arg0);
15918 tmode = insn_data[icode].operand[0].mode;
15919 mode0 = insn_data[icode].operand[1].mode;
15921 if (target == 0
15922 || GET_MODE (target) != tmode
15923 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15924 target = gen_reg_rtx (tmode);
15926 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15927 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
15929 pat = GEN_FCN (icode) (target, op0);
15930 if (! pat)
15931 return 0;
15932 emit_insn (pat);
15933 return target;
15936 /* Expand the stvx builtins. */
15937 static rtx
15938 altivec_expand_st_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
15939 bool *expandedp)
15941 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15942 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
15943 tree arg0, arg1;
15944 machine_mode mode0, mode1;
15945 rtx pat, op0, op1;
15946 enum insn_code icode;
15948 switch (fcode)
15950 case ALTIVEC_BUILTIN_ST_INTERNAL_16qi:
15951 icode = CODE_FOR_vector_altivec_store_v16qi;
15952 break;
15953 case ALTIVEC_BUILTIN_ST_INTERNAL_8hi:
15954 icode = CODE_FOR_vector_altivec_store_v8hi;
15955 break;
15956 case ALTIVEC_BUILTIN_ST_INTERNAL_4si:
15957 icode = CODE_FOR_vector_altivec_store_v4si;
15958 break;
15959 case ALTIVEC_BUILTIN_ST_INTERNAL_4sf:
15960 icode = CODE_FOR_vector_altivec_store_v4sf;
15961 break;
15962 case ALTIVEC_BUILTIN_ST_INTERNAL_2df:
15963 icode = CODE_FOR_vector_altivec_store_v2df;
15964 break;
15965 case ALTIVEC_BUILTIN_ST_INTERNAL_2di:
15966 icode = CODE_FOR_vector_altivec_store_v2di;
15967 break;
15968 case ALTIVEC_BUILTIN_ST_INTERNAL_1ti:
15969 icode = CODE_FOR_vector_altivec_store_v1ti;
15970 break;
15971 default:
15972 *expandedp = false;
15973 return NULL_RTX;
15976 arg0 = CALL_EXPR_ARG (exp, 0);
15977 arg1 = CALL_EXPR_ARG (exp, 1);
15978 op0 = expand_normal (arg0);
15979 op1 = expand_normal (arg1);
15980 mode0 = insn_data[icode].operand[0].mode;
15981 mode1 = insn_data[icode].operand[1].mode;
15983 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15984 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
15985 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
15986 op1 = copy_to_mode_reg (mode1, op1);
15988 pat = GEN_FCN (icode) (op0, op1);
15989 if (pat)
15990 emit_insn (pat);
15992 *expandedp = true;
15993 return NULL_RTX;
15996 /* Expand the dst builtins. */
15997 static rtx
15998 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
15999 bool *expandedp)
16001 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
16002 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
16003 tree arg0, arg1, arg2;
16004 machine_mode mode0, mode1;
16005 rtx pat, op0, op1, op2;
16006 const struct builtin_description *d;
16007 size_t i;
16009 *expandedp = false;
16011 /* Handle DST variants. */
16012 d = bdesc_dst;
16013 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
16014 if (d->code == fcode)
16016 arg0 = CALL_EXPR_ARG (exp, 0);
16017 arg1 = CALL_EXPR_ARG (exp, 1);
16018 arg2 = CALL_EXPR_ARG (exp, 2);
16019 op0 = expand_normal (arg0);
16020 op1 = expand_normal (arg1);
16021 op2 = expand_normal (arg2);
16022 mode0 = insn_data[d->icode].operand[0].mode;
16023 mode1 = insn_data[d->icode].operand[1].mode;
16025 /* Invalid arguments, bail out before generating bad rtl. */
16026 if (arg0 == error_mark_node
16027 || arg1 == error_mark_node
16028 || arg2 == error_mark_node)
16029 return const0_rtx;
16031 *expandedp = true;
16032 STRIP_NOPS (arg2);
16033 if (TREE_CODE (arg2) != INTEGER_CST
16034 || TREE_INT_CST_LOW (arg2) & ~0x3)
16036 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
16037 return const0_rtx;
16040 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
16041 op0 = copy_to_mode_reg (Pmode, op0);
16042 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
16043 op1 = copy_to_mode_reg (mode1, op1);
16045 pat = GEN_FCN (d->icode) (op0, op1, op2);
16046 if (pat != 0)
16047 emit_insn (pat);
16049 return NULL_RTX;
16052 return NULL_RTX;
16055 /* Expand vec_init builtin. */
16056 static rtx
16057 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
16059 machine_mode tmode = TYPE_MODE (type);
16060 machine_mode inner_mode = GET_MODE_INNER (tmode);
16061 int i, n_elt = GET_MODE_NUNITS (tmode);
16063 gcc_assert (VECTOR_MODE_P (tmode));
16064 gcc_assert (n_elt == call_expr_nargs (exp));
16066 if (!target || !register_operand (target, tmode))
16067 target = gen_reg_rtx (tmode);
16069 /* If we have a vector compromised of a single element, such as V1TImode, do
16070 the initialization directly. */
16071 if (n_elt == 1 && GET_MODE_SIZE (tmode) == GET_MODE_SIZE (inner_mode))
16073 rtx x = expand_normal (CALL_EXPR_ARG (exp, 0));
16074 emit_move_insn (target, gen_lowpart (tmode, x));
16076 else
16078 rtvec v = rtvec_alloc (n_elt);
16080 for (i = 0; i < n_elt; ++i)
16082 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
16083 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
16086 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
16089 return target;
16092 /* Return the integer constant in ARG. Constrain it to be in the range
16093 of the subparts of VEC_TYPE; issue an error if not. */
16095 static int
16096 get_element_number (tree vec_type, tree arg)
16098 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
16100 if (!tree_fits_uhwi_p (arg)
16101 || (elt = tree_to_uhwi (arg), elt > max))
16103 error ("selector must be an integer constant in the range 0..%wi", max);
16104 return 0;
16107 return elt;
16110 /* Expand vec_set builtin. */
16111 static rtx
16112 altivec_expand_vec_set_builtin (tree exp)
16114 machine_mode tmode, mode1;
16115 tree arg0, arg1, arg2;
16116 int elt;
16117 rtx op0, op1;
16119 arg0 = CALL_EXPR_ARG (exp, 0);
16120 arg1 = CALL_EXPR_ARG (exp, 1);
16121 arg2 = CALL_EXPR_ARG (exp, 2);
16123 tmode = TYPE_MODE (TREE_TYPE (arg0));
16124 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
16125 gcc_assert (VECTOR_MODE_P (tmode));
16127 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
16128 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
16129 elt = get_element_number (TREE_TYPE (arg0), arg2);
16131 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
16132 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
16134 op0 = force_reg (tmode, op0);
16135 op1 = force_reg (mode1, op1);
16137 rs6000_expand_vector_set (op0, op1, elt);
16139 return op0;
16142 /* Expand vec_ext builtin. */
16143 static rtx
16144 altivec_expand_vec_ext_builtin (tree exp, rtx target)
16146 machine_mode tmode, mode0;
16147 tree arg0, arg1;
16148 rtx op0;
16149 rtx op1;
16151 arg0 = CALL_EXPR_ARG (exp, 0);
16152 arg1 = CALL_EXPR_ARG (exp, 1);
16154 op0 = expand_normal (arg0);
16155 op1 = expand_normal (arg1);
16157 /* Call get_element_number to validate arg1 if it is a constant. */
16158 if (TREE_CODE (arg1) == INTEGER_CST)
16159 (void) get_element_number (TREE_TYPE (arg0), arg1);
16161 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
16162 mode0 = TYPE_MODE (TREE_TYPE (arg0));
16163 gcc_assert (VECTOR_MODE_P (mode0));
16165 op0 = force_reg (mode0, op0);
16167 if (optimize || !target || !register_operand (target, tmode))
16168 target = gen_reg_rtx (tmode);
16170 rs6000_expand_vector_extract (target, op0, op1);
16172 return target;
16175 /* Expand the builtin in EXP and store the result in TARGET. Store
16176 true in *EXPANDEDP if we found a builtin to expand. */
16177 static rtx
16178 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
16180 const struct builtin_description *d;
16181 size_t i;
16182 enum insn_code icode;
16183 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
16184 tree arg0, arg1, arg2;
16185 rtx op0, pat;
16186 machine_mode tmode, mode0;
16187 enum rs6000_builtins fcode
16188 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
16190 if (rs6000_overloaded_builtin_p (fcode))
16192 *expandedp = true;
16193 error ("unresolved overload for Altivec builtin %qF", fndecl);
16195 /* Given it is invalid, just generate a normal call. */
16196 return expand_call (exp, target, false);
16199 target = altivec_expand_ld_builtin (exp, target, expandedp);
16200 if (*expandedp)
16201 return target;
16203 target = altivec_expand_st_builtin (exp, target, expandedp);
16204 if (*expandedp)
16205 return target;
16207 target = altivec_expand_dst_builtin (exp, target, expandedp);
16208 if (*expandedp)
16209 return target;
16211 *expandedp = true;
16213 switch (fcode)
16215 case ALTIVEC_BUILTIN_STVX_V2DF:
16216 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df_2op, exp);
16217 case ALTIVEC_BUILTIN_STVX_V2DI:
16218 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di_2op, exp);
16219 case ALTIVEC_BUILTIN_STVX_V4SF:
16220 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf_2op, exp);
16221 case ALTIVEC_BUILTIN_STVX:
16222 case ALTIVEC_BUILTIN_STVX_V4SI:
16223 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si_2op, exp);
16224 case ALTIVEC_BUILTIN_STVX_V8HI:
16225 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi_2op, exp);
16226 case ALTIVEC_BUILTIN_STVX_V16QI:
16227 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi_2op, exp);
16228 case ALTIVEC_BUILTIN_STVEBX:
16229 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
16230 case ALTIVEC_BUILTIN_STVEHX:
16231 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
16232 case ALTIVEC_BUILTIN_STVEWX:
16233 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
16234 case ALTIVEC_BUILTIN_STVXL_V2DF:
16235 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df, exp);
16236 case ALTIVEC_BUILTIN_STVXL_V2DI:
16237 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di, exp);
16238 case ALTIVEC_BUILTIN_STVXL_V4SF:
16239 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf, exp);
16240 case ALTIVEC_BUILTIN_STVXL:
16241 case ALTIVEC_BUILTIN_STVXL_V4SI:
16242 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si, exp);
16243 case ALTIVEC_BUILTIN_STVXL_V8HI:
16244 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi, exp);
16245 case ALTIVEC_BUILTIN_STVXL_V16QI:
16246 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi, exp);
16248 case ALTIVEC_BUILTIN_STVLX:
16249 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
16250 case ALTIVEC_BUILTIN_STVLXL:
16251 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
16252 case ALTIVEC_BUILTIN_STVRX:
16253 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
16254 case ALTIVEC_BUILTIN_STVRXL:
16255 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
16257 case P9V_BUILTIN_STXVL:
16258 return altivec_expand_stxvl_builtin (CODE_FOR_stxvl, exp);
16260 case VSX_BUILTIN_STXVD2X_V1TI:
16261 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti, exp);
16262 case VSX_BUILTIN_STXVD2X_V2DF:
16263 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
16264 case VSX_BUILTIN_STXVD2X_V2DI:
16265 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
16266 case VSX_BUILTIN_STXVW4X_V4SF:
16267 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
16268 case VSX_BUILTIN_STXVW4X_V4SI:
16269 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
16270 case VSX_BUILTIN_STXVW4X_V8HI:
16271 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
16272 case VSX_BUILTIN_STXVW4X_V16QI:
16273 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
16275 /* For the following on big endian, it's ok to use any appropriate
16276 unaligned-supporting store, so use a generic expander. For
16277 little-endian, the exact element-reversing instruction must
16278 be used. */
16279 case VSX_BUILTIN_ST_ELEMREV_V2DF:
16281 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2df
16282 : CODE_FOR_vsx_st_elemrev_v2df);
16283 return altivec_expand_stv_builtin (code, exp);
16285 case VSX_BUILTIN_ST_ELEMREV_V2DI:
16287 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2di
16288 : CODE_FOR_vsx_st_elemrev_v2di);
16289 return altivec_expand_stv_builtin (code, exp);
16291 case VSX_BUILTIN_ST_ELEMREV_V4SF:
16293 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4sf
16294 : CODE_FOR_vsx_st_elemrev_v4sf);
16295 return altivec_expand_stv_builtin (code, exp);
16297 case VSX_BUILTIN_ST_ELEMREV_V4SI:
16299 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4si
16300 : CODE_FOR_vsx_st_elemrev_v4si);
16301 return altivec_expand_stv_builtin (code, exp);
16303 case VSX_BUILTIN_ST_ELEMREV_V8HI:
16305 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v8hi
16306 : CODE_FOR_vsx_st_elemrev_v8hi);
16307 return altivec_expand_stv_builtin (code, exp);
16309 case VSX_BUILTIN_ST_ELEMREV_V16QI:
16311 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v16qi
16312 : CODE_FOR_vsx_st_elemrev_v16qi);
16313 return altivec_expand_stv_builtin (code, exp);
16316 case ALTIVEC_BUILTIN_MFVSCR:
16317 icode = CODE_FOR_altivec_mfvscr;
16318 tmode = insn_data[icode].operand[0].mode;
16320 if (target == 0
16321 || GET_MODE (target) != tmode
16322 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
16323 target = gen_reg_rtx (tmode);
16325 pat = GEN_FCN (icode) (target);
16326 if (! pat)
16327 return 0;
16328 emit_insn (pat);
16329 return target;
16331 case ALTIVEC_BUILTIN_MTVSCR:
16332 icode = CODE_FOR_altivec_mtvscr;
16333 arg0 = CALL_EXPR_ARG (exp, 0);
16334 op0 = expand_normal (arg0);
16335 mode0 = insn_data[icode].operand[0].mode;
16337 /* If we got invalid arguments bail out before generating bad rtl. */
16338 if (arg0 == error_mark_node)
16339 return const0_rtx;
16341 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
16342 op0 = copy_to_mode_reg (mode0, op0);
16344 pat = GEN_FCN (icode) (op0);
16345 if (pat)
16346 emit_insn (pat);
16347 return NULL_RTX;
16349 case ALTIVEC_BUILTIN_DSSALL:
16350 emit_insn (gen_altivec_dssall ());
16351 return NULL_RTX;
16353 case ALTIVEC_BUILTIN_DSS:
16354 icode = CODE_FOR_altivec_dss;
16355 arg0 = CALL_EXPR_ARG (exp, 0);
16356 STRIP_NOPS (arg0);
16357 op0 = expand_normal (arg0);
16358 mode0 = insn_data[icode].operand[0].mode;
16360 /* If we got invalid arguments bail out before generating bad rtl. */
16361 if (arg0 == error_mark_node)
16362 return const0_rtx;
16364 if (TREE_CODE (arg0) != INTEGER_CST
16365 || TREE_INT_CST_LOW (arg0) & ~0x3)
16367 error ("argument to dss must be a 2-bit unsigned literal");
16368 return const0_rtx;
16371 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
16372 op0 = copy_to_mode_reg (mode0, op0);
16374 emit_insn (gen_altivec_dss (op0));
16375 return NULL_RTX;
16377 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
16378 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
16379 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
16380 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
16381 case VSX_BUILTIN_VEC_INIT_V2DF:
16382 case VSX_BUILTIN_VEC_INIT_V2DI:
16383 case VSX_BUILTIN_VEC_INIT_V1TI:
16384 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
16386 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
16387 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
16388 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
16389 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
16390 case VSX_BUILTIN_VEC_SET_V2DF:
16391 case VSX_BUILTIN_VEC_SET_V2DI:
16392 case VSX_BUILTIN_VEC_SET_V1TI:
16393 return altivec_expand_vec_set_builtin (exp);
16395 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
16396 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
16397 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
16398 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
16399 case VSX_BUILTIN_VEC_EXT_V2DF:
16400 case VSX_BUILTIN_VEC_EXT_V2DI:
16401 case VSX_BUILTIN_VEC_EXT_V1TI:
16402 return altivec_expand_vec_ext_builtin (exp, target);
16404 case P9V_BUILTIN_VEXTRACT4B:
16405 case P9V_BUILTIN_VEC_VEXTRACT4B:
16406 arg1 = CALL_EXPR_ARG (exp, 1);
16407 STRIP_NOPS (arg1);
16409 /* Generate a normal call if it is invalid. */
16410 if (arg1 == error_mark_node)
16411 return expand_call (exp, target, false);
16413 if (TREE_CODE (arg1) != INTEGER_CST || TREE_INT_CST_LOW (arg1) > 12)
16415 error ("second argument to vec_vextract4b must be 0..12");
16416 return expand_call (exp, target, false);
16418 break;
16420 case P9V_BUILTIN_VINSERT4B:
16421 case P9V_BUILTIN_VINSERT4B_DI:
16422 case P9V_BUILTIN_VEC_VINSERT4B:
16423 arg2 = CALL_EXPR_ARG (exp, 2);
16424 STRIP_NOPS (arg2);
16426 /* Generate a normal call if it is invalid. */
16427 if (arg2 == error_mark_node)
16428 return expand_call (exp, target, false);
16430 if (TREE_CODE (arg2) != INTEGER_CST || TREE_INT_CST_LOW (arg2) > 12)
16432 error ("third argument to vec_vinsert4b must be 0..12");
16433 return expand_call (exp, target, false);
16435 break;
16437 default:
16438 break;
16439 /* Fall through. */
16442 /* Expand abs* operations. */
16443 d = bdesc_abs;
16444 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
16445 if (d->code == fcode)
16446 return altivec_expand_abs_builtin (d->icode, exp, target);
16448 /* Expand the AltiVec predicates. */
16449 d = bdesc_altivec_preds;
16450 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
16451 if (d->code == fcode)
16452 return altivec_expand_predicate_builtin (d->icode, exp, target);
16454 /* LV* are funky. We initialized them differently. */
16455 switch (fcode)
16457 case ALTIVEC_BUILTIN_LVSL:
16458 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
16459 exp, target, false);
16460 case ALTIVEC_BUILTIN_LVSR:
16461 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
16462 exp, target, false);
16463 case ALTIVEC_BUILTIN_LVEBX:
16464 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
16465 exp, target, false);
16466 case ALTIVEC_BUILTIN_LVEHX:
16467 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
16468 exp, target, false);
16469 case ALTIVEC_BUILTIN_LVEWX:
16470 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
16471 exp, target, false);
16472 case ALTIVEC_BUILTIN_LVXL_V2DF:
16473 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df,
16474 exp, target, false);
16475 case ALTIVEC_BUILTIN_LVXL_V2DI:
16476 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di,
16477 exp, target, false);
16478 case ALTIVEC_BUILTIN_LVXL_V4SF:
16479 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf,
16480 exp, target, false);
16481 case ALTIVEC_BUILTIN_LVXL:
16482 case ALTIVEC_BUILTIN_LVXL_V4SI:
16483 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si,
16484 exp, target, false);
16485 case ALTIVEC_BUILTIN_LVXL_V8HI:
16486 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi,
16487 exp, target, false);
16488 case ALTIVEC_BUILTIN_LVXL_V16QI:
16489 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi,
16490 exp, target, false);
16491 case ALTIVEC_BUILTIN_LVX_V2DF:
16492 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df_2op,
16493 exp, target, false);
16494 case ALTIVEC_BUILTIN_LVX_V2DI:
16495 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di_2op,
16496 exp, target, false);
16497 case ALTIVEC_BUILTIN_LVX_V4SF:
16498 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf_2op,
16499 exp, target, false);
16500 case ALTIVEC_BUILTIN_LVX:
16501 case ALTIVEC_BUILTIN_LVX_V4SI:
16502 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si_2op,
16503 exp, target, false);
16504 case ALTIVEC_BUILTIN_LVX_V8HI:
16505 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi_2op,
16506 exp, target, false);
16507 case ALTIVEC_BUILTIN_LVX_V16QI:
16508 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi_2op,
16509 exp, target, false);
16510 case ALTIVEC_BUILTIN_LVLX:
16511 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
16512 exp, target, true);
16513 case ALTIVEC_BUILTIN_LVLXL:
16514 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
16515 exp, target, true);
16516 case ALTIVEC_BUILTIN_LVRX:
16517 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
16518 exp, target, true);
16519 case ALTIVEC_BUILTIN_LVRXL:
16520 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
16521 exp, target, true);
16522 case VSX_BUILTIN_LXVD2X_V1TI:
16523 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti,
16524 exp, target, false);
16525 case VSX_BUILTIN_LXVD2X_V2DF:
16526 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
16527 exp, target, false);
16528 case VSX_BUILTIN_LXVD2X_V2DI:
16529 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
16530 exp, target, false);
16531 case VSX_BUILTIN_LXVW4X_V4SF:
16532 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
16533 exp, target, false);
16534 case VSX_BUILTIN_LXVW4X_V4SI:
16535 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
16536 exp, target, false);
16537 case VSX_BUILTIN_LXVW4X_V8HI:
16538 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
16539 exp, target, false);
16540 case VSX_BUILTIN_LXVW4X_V16QI:
16541 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
16542 exp, target, false);
16543 /* For the following on big endian, it's ok to use any appropriate
16544 unaligned-supporting load, so use a generic expander. For
16545 little-endian, the exact element-reversing instruction must
16546 be used. */
16547 case VSX_BUILTIN_LD_ELEMREV_V2DF:
16549 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2df
16550 : CODE_FOR_vsx_ld_elemrev_v2df);
16551 return altivec_expand_lv_builtin (code, exp, target, false);
16553 case VSX_BUILTIN_LD_ELEMREV_V2DI:
16555 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2di
16556 : CODE_FOR_vsx_ld_elemrev_v2di);
16557 return altivec_expand_lv_builtin (code, exp, target, false);
16559 case VSX_BUILTIN_LD_ELEMREV_V4SF:
16561 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4sf
16562 : CODE_FOR_vsx_ld_elemrev_v4sf);
16563 return altivec_expand_lv_builtin (code, exp, target, false);
16565 case VSX_BUILTIN_LD_ELEMREV_V4SI:
16567 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4si
16568 : CODE_FOR_vsx_ld_elemrev_v4si);
16569 return altivec_expand_lv_builtin (code, exp, target, false);
16571 case VSX_BUILTIN_LD_ELEMREV_V8HI:
16573 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v8hi
16574 : CODE_FOR_vsx_ld_elemrev_v8hi);
16575 return altivec_expand_lv_builtin (code, exp, target, false);
16577 case VSX_BUILTIN_LD_ELEMREV_V16QI:
16579 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v16qi
16580 : CODE_FOR_vsx_ld_elemrev_v16qi);
16581 return altivec_expand_lv_builtin (code, exp, target, false);
16583 break;
16584 default:
16585 break;
16586 /* Fall through. */
16589 *expandedp = false;
16590 return NULL_RTX;
16593 /* Expand the builtin in EXP and store the result in TARGET. Store
16594 true in *EXPANDEDP if we found a builtin to expand. */
16595 static rtx
16596 paired_expand_builtin (tree exp, rtx target, bool * expandedp)
16598 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
16599 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
16600 const struct builtin_description *d;
16601 size_t i;
16603 *expandedp = true;
16605 switch (fcode)
16607 case PAIRED_BUILTIN_STX:
16608 return paired_expand_stv_builtin (CODE_FOR_paired_stx, exp);
16609 case PAIRED_BUILTIN_LX:
16610 return paired_expand_lv_builtin (CODE_FOR_paired_lx, exp, target);
16611 default:
16612 break;
16613 /* Fall through. */
16616 /* Expand the paired predicates. */
16617 d = bdesc_paired_preds;
16618 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); i++, d++)
16619 if (d->code == fcode)
16620 return paired_expand_predicate_builtin (d->icode, exp, target);
16622 *expandedp = false;
16623 return NULL_RTX;
16626 /* Binops that need to be initialized manually, but can be expanded
16627 automagically by rs6000_expand_binop_builtin. */
16628 static const struct builtin_description bdesc_2arg_spe[] =
16630 { RS6000_BTM_SPE, CODE_FOR_spe_evlddx, "__builtin_spe_evlddx", SPE_BUILTIN_EVLDDX },
16631 { RS6000_BTM_SPE, CODE_FOR_spe_evldwx, "__builtin_spe_evldwx", SPE_BUILTIN_EVLDWX },
16632 { RS6000_BTM_SPE, CODE_FOR_spe_evldhx, "__builtin_spe_evldhx", SPE_BUILTIN_EVLDHX },
16633 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhex, "__builtin_spe_evlwhex", SPE_BUILTIN_EVLWHEX },
16634 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhoux, "__builtin_spe_evlwhoux", SPE_BUILTIN_EVLWHOUX },
16635 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhosx, "__builtin_spe_evlwhosx", SPE_BUILTIN_EVLWHOSX },
16636 { RS6000_BTM_SPE, CODE_FOR_spe_evlwwsplatx, "__builtin_spe_evlwwsplatx", SPE_BUILTIN_EVLWWSPLATX },
16637 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhsplatx, "__builtin_spe_evlwhsplatx", SPE_BUILTIN_EVLWHSPLATX },
16638 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhesplatx, "__builtin_spe_evlhhesplatx", SPE_BUILTIN_EVLHHESPLATX },
16639 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhousplatx, "__builtin_spe_evlhhousplatx", SPE_BUILTIN_EVLHHOUSPLATX },
16640 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhossplatx, "__builtin_spe_evlhhossplatx", SPE_BUILTIN_EVLHHOSSPLATX },
16641 { RS6000_BTM_SPE, CODE_FOR_spe_evldd, "__builtin_spe_evldd", SPE_BUILTIN_EVLDD },
16642 { RS6000_BTM_SPE, CODE_FOR_spe_evldw, "__builtin_spe_evldw", SPE_BUILTIN_EVLDW },
16643 { RS6000_BTM_SPE, CODE_FOR_spe_evldh, "__builtin_spe_evldh", SPE_BUILTIN_EVLDH },
16644 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhe, "__builtin_spe_evlwhe", SPE_BUILTIN_EVLWHE },
16645 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhou, "__builtin_spe_evlwhou", SPE_BUILTIN_EVLWHOU },
16646 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhos, "__builtin_spe_evlwhos", SPE_BUILTIN_EVLWHOS },
16647 { RS6000_BTM_SPE, CODE_FOR_spe_evlwwsplat, "__builtin_spe_evlwwsplat", SPE_BUILTIN_EVLWWSPLAT },
16648 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhsplat, "__builtin_spe_evlwhsplat", SPE_BUILTIN_EVLWHSPLAT },
16649 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhesplat, "__builtin_spe_evlhhesplat", SPE_BUILTIN_EVLHHESPLAT },
16650 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhousplat, "__builtin_spe_evlhhousplat", SPE_BUILTIN_EVLHHOUSPLAT },
16651 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhossplat, "__builtin_spe_evlhhossplat", SPE_BUILTIN_EVLHHOSSPLAT }
16654 /* Expand the builtin in EXP and store the result in TARGET. Store
16655 true in *EXPANDEDP if we found a builtin to expand.
16657 This expands the SPE builtins that are not simple unary and binary
16658 operations. */
16659 static rtx
16660 spe_expand_builtin (tree exp, rtx target, bool *expandedp)
16662 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
16663 tree arg1, arg0;
16664 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
16665 enum insn_code icode;
16666 machine_mode tmode, mode0;
16667 rtx pat, op0;
16668 const struct builtin_description *d;
16669 size_t i;
16671 *expandedp = true;
16673 /* Syntax check for a 5-bit unsigned immediate. */
16674 switch (fcode)
16676 case SPE_BUILTIN_EVSTDD:
16677 case SPE_BUILTIN_EVSTDH:
16678 case SPE_BUILTIN_EVSTDW:
16679 case SPE_BUILTIN_EVSTWHE:
16680 case SPE_BUILTIN_EVSTWHO:
16681 case SPE_BUILTIN_EVSTWWE:
16682 case SPE_BUILTIN_EVSTWWO:
16683 arg1 = CALL_EXPR_ARG (exp, 2);
16684 if (TREE_CODE (arg1) != INTEGER_CST
16685 || TREE_INT_CST_LOW (arg1) & ~0x1f)
16687 error ("argument 2 must be a 5-bit unsigned literal");
16688 return const0_rtx;
16690 break;
16691 default:
16692 break;
16695 /* The evsplat*i instructions are not quite generic. */
16696 switch (fcode)
16698 case SPE_BUILTIN_EVSPLATFI:
16699 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplatfi,
16700 exp, target);
16701 case SPE_BUILTIN_EVSPLATI:
16702 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplati,
16703 exp, target);
16704 default:
16705 break;
16708 d = bdesc_2arg_spe;
16709 for (i = 0; i < ARRAY_SIZE (bdesc_2arg_spe); ++i, ++d)
16710 if (d->code == fcode)
16711 return rs6000_expand_binop_builtin (d->icode, exp, target);
16713 d = bdesc_spe_predicates;
16714 for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, ++d)
16715 if (d->code == fcode)
16716 return spe_expand_predicate_builtin (d->icode, exp, target);
16718 d = bdesc_spe_evsel;
16719 for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, ++d)
16720 if (d->code == fcode)
16721 return spe_expand_evsel_builtin (d->icode, exp, target);
16723 switch (fcode)
16725 case SPE_BUILTIN_EVSTDDX:
16726 return spe_expand_stv_builtin (CODE_FOR_spe_evstddx, exp);
16727 case SPE_BUILTIN_EVSTDHX:
16728 return spe_expand_stv_builtin (CODE_FOR_spe_evstdhx, exp);
16729 case SPE_BUILTIN_EVSTDWX:
16730 return spe_expand_stv_builtin (CODE_FOR_spe_evstdwx, exp);
16731 case SPE_BUILTIN_EVSTWHEX:
16732 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhex, exp);
16733 case SPE_BUILTIN_EVSTWHOX:
16734 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhox, exp);
16735 case SPE_BUILTIN_EVSTWWEX:
16736 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwex, exp);
16737 case SPE_BUILTIN_EVSTWWOX:
16738 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwox, exp);
16739 case SPE_BUILTIN_EVSTDD:
16740 return spe_expand_stv_builtin (CODE_FOR_spe_evstdd, exp);
16741 case SPE_BUILTIN_EVSTDH:
16742 return spe_expand_stv_builtin (CODE_FOR_spe_evstdh, exp);
16743 case SPE_BUILTIN_EVSTDW:
16744 return spe_expand_stv_builtin (CODE_FOR_spe_evstdw, exp);
16745 case SPE_BUILTIN_EVSTWHE:
16746 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhe, exp);
16747 case SPE_BUILTIN_EVSTWHO:
16748 return spe_expand_stv_builtin (CODE_FOR_spe_evstwho, exp);
16749 case SPE_BUILTIN_EVSTWWE:
16750 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwe, exp);
16751 case SPE_BUILTIN_EVSTWWO:
16752 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwo, exp);
16753 case SPE_BUILTIN_MFSPEFSCR:
16754 icode = CODE_FOR_spe_mfspefscr;
16755 tmode = insn_data[icode].operand[0].mode;
16757 if (target == 0
16758 || GET_MODE (target) != tmode
16759 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
16760 target = gen_reg_rtx (tmode);
16762 pat = GEN_FCN (icode) (target);
16763 if (! pat)
16764 return 0;
16765 emit_insn (pat);
16766 return target;
16767 case SPE_BUILTIN_MTSPEFSCR:
16768 icode = CODE_FOR_spe_mtspefscr;
16769 arg0 = CALL_EXPR_ARG (exp, 0);
16770 op0 = expand_normal (arg0);
16771 mode0 = insn_data[icode].operand[0].mode;
16773 if (arg0 == error_mark_node)
16774 return const0_rtx;
16776 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
16777 op0 = copy_to_mode_reg (mode0, op0);
16779 pat = GEN_FCN (icode) (op0);
16780 if (pat)
16781 emit_insn (pat);
16782 return NULL_RTX;
16783 default:
16784 break;
16787 *expandedp = false;
16788 return NULL_RTX;
16791 static rtx
16792 paired_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
16794 rtx pat, scratch, tmp;
16795 tree form = CALL_EXPR_ARG (exp, 0);
16796 tree arg0 = CALL_EXPR_ARG (exp, 1);
16797 tree arg1 = CALL_EXPR_ARG (exp, 2);
16798 rtx op0 = expand_normal (arg0);
16799 rtx op1 = expand_normal (arg1);
16800 machine_mode mode0 = insn_data[icode].operand[1].mode;
16801 machine_mode mode1 = insn_data[icode].operand[2].mode;
16802 int form_int;
16803 enum rtx_code code;
16805 if (TREE_CODE (form) != INTEGER_CST)
16807 error ("argument 1 of __builtin_paired_predicate must be a constant");
16808 return const0_rtx;
16810 else
16811 form_int = TREE_INT_CST_LOW (form);
16813 gcc_assert (mode0 == mode1);
16815 if (arg0 == error_mark_node || arg1 == error_mark_node)
16816 return const0_rtx;
16818 if (target == 0
16819 || GET_MODE (target) != SImode
16820 || !(*insn_data[icode].operand[0].predicate) (target, SImode))
16821 target = gen_reg_rtx (SImode);
16822 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
16823 op0 = copy_to_mode_reg (mode0, op0);
16824 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
16825 op1 = copy_to_mode_reg (mode1, op1);
16827 scratch = gen_reg_rtx (CCFPmode);
16829 pat = GEN_FCN (icode) (scratch, op0, op1);
16830 if (!pat)
16831 return const0_rtx;
16833 emit_insn (pat);
16835 switch (form_int)
16837 /* LT bit. */
16838 case 0:
16839 code = LT;
16840 break;
16841 /* GT bit. */
16842 case 1:
16843 code = GT;
16844 break;
16845 /* EQ bit. */
16846 case 2:
16847 code = EQ;
16848 break;
16849 /* UN bit. */
16850 case 3:
16851 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
16852 return target;
16853 default:
16854 error ("argument 1 of __builtin_paired_predicate is out of range");
16855 return const0_rtx;
16858 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
16859 emit_move_insn (target, tmp);
16860 return target;
16863 static rtx
16864 spe_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
16866 rtx pat, scratch, tmp;
16867 tree form = CALL_EXPR_ARG (exp, 0);
16868 tree arg0 = CALL_EXPR_ARG (exp, 1);
16869 tree arg1 = CALL_EXPR_ARG (exp, 2);
16870 rtx op0 = expand_normal (arg0);
16871 rtx op1 = expand_normal (arg1);
16872 machine_mode mode0 = insn_data[icode].operand[1].mode;
16873 machine_mode mode1 = insn_data[icode].operand[2].mode;
16874 int form_int;
16875 enum rtx_code code;
16877 if (TREE_CODE (form) != INTEGER_CST)
16879 error ("argument 1 of __builtin_spe_predicate must be a constant");
16880 return const0_rtx;
16882 else
16883 form_int = TREE_INT_CST_LOW (form);
16885 gcc_assert (mode0 == mode1);
16887 if (arg0 == error_mark_node || arg1 == error_mark_node)
16888 return const0_rtx;
16890 if (target == 0
16891 || GET_MODE (target) != SImode
16892 || ! (*insn_data[icode].operand[0].predicate) (target, SImode))
16893 target = gen_reg_rtx (SImode);
16895 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
16896 op0 = copy_to_mode_reg (mode0, op0);
16897 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
16898 op1 = copy_to_mode_reg (mode1, op1);
16900 scratch = gen_reg_rtx (CCmode);
16902 pat = GEN_FCN (icode) (scratch, op0, op1);
16903 if (! pat)
16904 return const0_rtx;
16905 emit_insn (pat);
16907 /* There are 4 variants for each predicate: _any_, _all_, _upper_,
16908 _lower_. We use one compare, but look in different bits of the
16909 CR for each variant.
16911 There are 2 elements in each SPE simd type (upper/lower). The CR
16912 bits are set as follows:
16914 BIT0 | BIT 1 | BIT 2 | BIT 3
16915 U | L | (U | L) | (U & L)
16917 So, for an "all" relationship, BIT 3 would be set.
16918 For an "any" relationship, BIT 2 would be set. Etc.
16920 Following traditional nomenclature, these bits map to:
16922 BIT0 | BIT 1 | BIT 2 | BIT 3
16923 LT | GT | EQ | OV
16925 Later, we will generate rtl to look in the LT/EQ/EQ/OV bits.
16928 switch (form_int)
16930 /* All variant. OV bit. */
16931 case 0:
16932 /* We need to get to the OV bit, which is the ORDERED bit. We
16933 could generate (ordered:SI (reg:CC xx) (const_int 0)), but
16934 that's ugly and will make validate_condition_mode die.
16935 So let's just use another pattern. */
16936 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
16937 return target;
16938 /* Any variant. EQ bit. */
16939 case 1:
16940 code = EQ;
16941 break;
16942 /* Upper variant. LT bit. */
16943 case 2:
16944 code = LT;
16945 break;
16946 /* Lower variant. GT bit. */
16947 case 3:
16948 code = GT;
16949 break;
16950 default:
16951 error ("argument 1 of __builtin_spe_predicate is out of range");
16952 return const0_rtx;
16955 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
16956 emit_move_insn (target, tmp);
16958 return target;
16961 /* The evsel builtins look like this:
16963 e = __builtin_spe_evsel_OP (a, b, c, d);
16965 and work like this:
16967 e[upper] = a[upper] *OP* b[upper] ? c[upper] : d[upper];
16968 e[lower] = a[lower] *OP* b[lower] ? c[lower] : d[lower];
16971 static rtx
16972 spe_expand_evsel_builtin (enum insn_code icode, tree exp, rtx target)
16974 rtx pat, scratch;
16975 tree arg0 = CALL_EXPR_ARG (exp, 0);
16976 tree arg1 = CALL_EXPR_ARG (exp, 1);
16977 tree arg2 = CALL_EXPR_ARG (exp, 2);
16978 tree arg3 = CALL_EXPR_ARG (exp, 3);
16979 rtx op0 = expand_normal (arg0);
16980 rtx op1 = expand_normal (arg1);
16981 rtx op2 = expand_normal (arg2);
16982 rtx op3 = expand_normal (arg3);
16983 machine_mode mode0 = insn_data[icode].operand[1].mode;
16984 machine_mode mode1 = insn_data[icode].operand[2].mode;
16986 gcc_assert (mode0 == mode1);
16988 if (arg0 == error_mark_node || arg1 == error_mark_node
16989 || arg2 == error_mark_node || arg3 == error_mark_node)
16990 return const0_rtx;
16992 if (target == 0
16993 || GET_MODE (target) != mode0
16994 || ! (*insn_data[icode].operand[0].predicate) (target, mode0))
16995 target = gen_reg_rtx (mode0);
16997 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
16998 op0 = copy_to_mode_reg (mode0, op0);
16999 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
17000 op1 = copy_to_mode_reg (mode0, op1);
17001 if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
17002 op2 = copy_to_mode_reg (mode0, op2);
17003 if (! (*insn_data[icode].operand[1].predicate) (op3, mode1))
17004 op3 = copy_to_mode_reg (mode0, op3);
17006 /* Generate the compare. */
17007 scratch = gen_reg_rtx (CCmode);
17008 pat = GEN_FCN (icode) (scratch, op0, op1);
17009 if (! pat)
17010 return const0_rtx;
17011 emit_insn (pat);
17013 if (mode0 == V2SImode)
17014 emit_insn (gen_spe_evsel (target, op2, op3, scratch));
17015 else
17016 emit_insn (gen_spe_evsel_fs (target, op2, op3, scratch));
17018 return target;
17021 /* Raise an error message for a builtin function that is called without the
17022 appropriate target options being set. */
17024 static void
17025 rs6000_invalid_builtin (enum rs6000_builtins fncode)
17027 size_t uns_fncode = (size_t)fncode;
17028 const char *name = rs6000_builtin_info[uns_fncode].name;
17029 HOST_WIDE_INT fnmask = rs6000_builtin_info[uns_fncode].mask;
17031 gcc_assert (name != NULL);
17032 if ((fnmask & RS6000_BTM_CELL) != 0)
17033 error ("Builtin function %s is only valid for the cell processor", name);
17034 else if ((fnmask & RS6000_BTM_VSX) != 0)
17035 error ("Builtin function %s requires the -mvsx option", name);
17036 else if ((fnmask & RS6000_BTM_HTM) != 0)
17037 error ("Builtin function %s requires the -mhtm option", name);
17038 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
17039 error ("Builtin function %s requires the -maltivec option", name);
17040 else if ((fnmask & RS6000_BTM_PAIRED) != 0)
17041 error ("Builtin function %s requires the -mpaired option", name);
17042 else if ((fnmask & RS6000_BTM_SPE) != 0)
17043 error ("Builtin function %s requires the -mspe option", name);
17044 else if ((fnmask & (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
17045 == (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
17046 error ("Builtin function %s requires the -mhard-dfp and"
17047 " -mpower8-vector options", name);
17048 else if ((fnmask & RS6000_BTM_DFP) != 0)
17049 error ("Builtin function %s requires the -mhard-dfp option", name);
17050 else if ((fnmask & RS6000_BTM_P8_VECTOR) != 0)
17051 error ("Builtin function %s requires the -mpower8-vector option", name);
17052 else if ((fnmask & (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
17053 == (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
17054 error ("Builtin function %s requires the -mcpu=power9 and"
17055 " -m64 options", name);
17056 else if ((fnmask & RS6000_BTM_P9_VECTOR) != 0)
17057 error ("Builtin function %s requires the -mcpu=power9 option", name);
17058 else if ((fnmask & (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
17059 == (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
17060 error ("Builtin function %s requires the -mcpu=power9 and"
17061 " -m64 options", name);
17062 else if ((fnmask & RS6000_BTM_P9_MISC) == RS6000_BTM_P9_MISC)
17063 error ("Builtin function %s requires the -mcpu=power9 option", name);
17064 else if ((fnmask & (RS6000_BTM_HARD_FLOAT | RS6000_BTM_LDBL128))
17065 == (RS6000_BTM_HARD_FLOAT | RS6000_BTM_LDBL128))
17066 error ("Builtin function %s requires the -mhard-float and"
17067 " -mlong-double-128 options", name);
17068 else if ((fnmask & RS6000_BTM_HARD_FLOAT) != 0)
17069 error ("Builtin function %s requires the -mhard-float option", name);
17070 else if ((fnmask & RS6000_BTM_FLOAT128) != 0)
17071 error ("Builtin function %s requires the -mfloat128 option", name);
17072 else
17073 error ("Builtin function %s is not supported with the current options",
17074 name);
17077 /* Target hook for early folding of built-ins, shamelessly stolen
17078 from ia64.c. */
17080 static tree
17081 rs6000_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED,
17082 tree *args, bool ignore ATTRIBUTE_UNUSED)
17084 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD)
17086 enum rs6000_builtins fn_code
17087 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
17088 switch (fn_code)
17090 case RS6000_BUILTIN_NANQ:
17091 case RS6000_BUILTIN_NANSQ:
17093 tree type = TREE_TYPE (TREE_TYPE (fndecl));
17094 const char *str = c_getstr (*args);
17095 int quiet = fn_code == RS6000_BUILTIN_NANQ;
17096 REAL_VALUE_TYPE real;
17098 if (str && real_nan (&real, str, quiet, TYPE_MODE (type)))
17099 return build_real (type, real);
17100 return NULL_TREE;
17102 case RS6000_BUILTIN_INFQ:
17103 case RS6000_BUILTIN_HUGE_VALQ:
17105 tree type = TREE_TYPE (TREE_TYPE (fndecl));
17106 REAL_VALUE_TYPE inf;
17107 real_inf (&inf);
17108 return build_real (type, inf);
17110 default:
17111 break;
17114 #ifdef SUBTARGET_FOLD_BUILTIN
17115 return SUBTARGET_FOLD_BUILTIN (fndecl, n_args, args, ignore);
17116 #else
17117 return NULL_TREE;
17118 #endif
17121 /* Fold a machine-dependent built-in in GIMPLE. (For folding into
17122 a constant, use rs6000_fold_builtin.) */
17124 bool
17125 rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
17127 gimple *stmt = gsi_stmt (*gsi);
17128 tree fndecl = gimple_call_fndecl (stmt);
17129 gcc_checking_assert (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD);
17130 enum rs6000_builtins fn_code
17131 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
17132 tree arg0, arg1, lhs;
17134 switch (fn_code)
17136 /* Flavors of vec_add. We deliberately don't expand
17137 P8V_BUILTIN_VADDUQM as it gets lowered from V1TImode to
17138 TImode, resulting in much poorer code generation. */
17139 case ALTIVEC_BUILTIN_VADDUBM:
17140 case ALTIVEC_BUILTIN_VADDUHM:
17141 case ALTIVEC_BUILTIN_VADDUWM:
17142 case P8V_BUILTIN_VADDUDM:
17143 case ALTIVEC_BUILTIN_VADDFP:
17144 case VSX_BUILTIN_XVADDDP:
17146 arg0 = gimple_call_arg (stmt, 0);
17147 arg1 = gimple_call_arg (stmt, 1);
17148 lhs = gimple_call_lhs (stmt);
17149 gimple *g = gimple_build_assign (lhs, PLUS_EXPR, arg0, arg1);
17150 gimple_set_location (g, gimple_location (stmt));
17151 gsi_replace (gsi, g, true);
17152 return true;
17154 /* Flavors of vec_sub. We deliberately don't expand
17155 P8V_BUILTIN_VSUBUQM. */
17156 case ALTIVEC_BUILTIN_VSUBUBM:
17157 case ALTIVEC_BUILTIN_VSUBUHM:
17158 case ALTIVEC_BUILTIN_VSUBUWM:
17159 case P8V_BUILTIN_VSUBUDM:
17160 case ALTIVEC_BUILTIN_VSUBFP:
17161 case VSX_BUILTIN_XVSUBDP:
17163 arg0 = gimple_call_arg (stmt, 0);
17164 arg1 = gimple_call_arg (stmt, 1);
17165 lhs = gimple_call_lhs (stmt);
17166 gimple *g = gimple_build_assign (lhs, MINUS_EXPR, arg0, arg1);
17167 gimple_set_location (g, gimple_location (stmt));
17168 gsi_replace (gsi, g, true);
17169 return true;
17171 case VSX_BUILTIN_XVMULSP:
17172 case VSX_BUILTIN_XVMULDP:
17174 arg0 = gimple_call_arg (stmt, 0);
17175 arg1 = gimple_call_arg (stmt, 1);
17176 lhs = gimple_call_lhs (stmt);
17177 gimple *g = gimple_build_assign (lhs, MULT_EXPR, arg0, arg1);
17178 gimple_set_location (g, gimple_location (stmt));
17179 gsi_replace (gsi, g, true);
17180 return true;
17182 /* Even element flavors of vec_mul (signed). */
17183 case ALTIVEC_BUILTIN_VMULESB:
17184 case ALTIVEC_BUILTIN_VMULESH:
17185 /* Even element flavors of vec_mul (unsigned). */
17186 case ALTIVEC_BUILTIN_VMULEUB:
17187 case ALTIVEC_BUILTIN_VMULEUH:
17189 arg0 = gimple_call_arg (stmt, 0);
17190 arg1 = gimple_call_arg (stmt, 1);
17191 lhs = gimple_call_lhs (stmt);
17192 gimple *g = gimple_build_assign (lhs, VEC_WIDEN_MULT_EVEN_EXPR, arg0, arg1);
17193 gimple_set_location (g, gimple_location (stmt));
17194 gsi_replace (gsi, g, true);
17195 return true;
17197 /* Odd element flavors of vec_mul (signed). */
17198 case ALTIVEC_BUILTIN_VMULOSB:
17199 case ALTIVEC_BUILTIN_VMULOSH:
17200 /* Odd element flavors of vec_mul (unsigned). */
17201 case ALTIVEC_BUILTIN_VMULOUB:
17202 case ALTIVEC_BUILTIN_VMULOUH:
17204 arg0 = gimple_call_arg (stmt, 0);
17205 arg1 = gimple_call_arg (stmt, 1);
17206 lhs = gimple_call_lhs (stmt);
17207 gimple *g = gimple_build_assign (lhs, VEC_WIDEN_MULT_ODD_EXPR, arg0, arg1);
17208 gimple_set_location (g, gimple_location (stmt));
17209 gsi_replace (gsi, g, true);
17210 return true;
17212 /* Flavors of vec_div (Integer). */
17213 case VSX_BUILTIN_DIV_V2DI:
17214 case VSX_BUILTIN_UDIV_V2DI:
17216 arg0 = gimple_call_arg (stmt, 0);
17217 arg1 = gimple_call_arg (stmt, 1);
17218 lhs = gimple_call_lhs (stmt);
17219 gimple *g = gimple_build_assign (lhs, TRUNC_DIV_EXPR, arg0, arg1);
17220 gimple_set_location (g, gimple_location (stmt));
17221 gsi_replace (gsi, g, true);
17222 return true;
17224 /* Flavors of vec_div (Float). */
17225 case VSX_BUILTIN_XVDIVSP:
17226 case VSX_BUILTIN_XVDIVDP:
17228 arg0 = gimple_call_arg (stmt, 0);
17229 arg1 = gimple_call_arg (stmt, 1);
17230 lhs = gimple_call_lhs (stmt);
17231 gimple *g = gimple_build_assign (lhs, RDIV_EXPR, arg0, arg1);
17232 gimple_set_location (g, gimple_location (stmt));
17233 gsi_replace (gsi, g, true);
17234 return true;
17236 /* Flavors of vec_and. */
17237 case ALTIVEC_BUILTIN_VAND:
17239 arg0 = gimple_call_arg (stmt, 0);
17240 arg1 = gimple_call_arg (stmt, 1);
17241 lhs = gimple_call_lhs (stmt);
17242 gimple *g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, arg1);
17243 gimple_set_location (g, gimple_location (stmt));
17244 gsi_replace (gsi, g, true);
17245 return true;
17247 /* Flavors of vec_andc. */
17248 case ALTIVEC_BUILTIN_VANDC:
17250 arg0 = gimple_call_arg (stmt, 0);
17251 arg1 = gimple_call_arg (stmt, 1);
17252 lhs = gimple_call_lhs (stmt);
17253 tree temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
17254 gimple *g = gimple_build_assign(temp, BIT_NOT_EXPR, arg1);
17255 gimple_set_location (g, gimple_location (stmt));
17256 gsi_insert_before(gsi, g, GSI_SAME_STMT);
17257 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, temp);
17258 gimple_set_location (g, gimple_location (stmt));
17259 gsi_replace (gsi, g, true);
17260 return true;
17262 /* Flavors of vec_nand. */
17263 case P8V_BUILTIN_VEC_NAND:
17264 case P8V_BUILTIN_NAND_V16QI:
17265 case P8V_BUILTIN_NAND_V8HI:
17266 case P8V_BUILTIN_NAND_V4SI:
17267 case P8V_BUILTIN_NAND_V4SF:
17268 case P8V_BUILTIN_NAND_V2DF:
17269 case P8V_BUILTIN_NAND_V2DI:
17271 arg0 = gimple_call_arg (stmt, 0);
17272 arg1 = gimple_call_arg (stmt, 1);
17273 lhs = gimple_call_lhs (stmt);
17274 tree temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
17275 gimple *g = gimple_build_assign(temp, BIT_AND_EXPR, arg0, arg1);
17276 gimple_set_location (g, gimple_location (stmt));
17277 gsi_insert_before(gsi, g, GSI_SAME_STMT);
17278 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
17279 gimple_set_location (g, gimple_location (stmt));
17280 gsi_replace (gsi, g, true);
17281 return true;
17283 /* Flavors of vec_or. */
17284 case ALTIVEC_BUILTIN_VOR:
17286 arg0 = gimple_call_arg (stmt, 0);
17287 arg1 = gimple_call_arg (stmt, 1);
17288 lhs = gimple_call_lhs (stmt);
17289 gimple *g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, arg1);
17290 gimple_set_location (g, gimple_location (stmt));
17291 gsi_replace (gsi, g, true);
17292 return true;
17294 /* flavors of vec_orc. */
17295 case P8V_BUILTIN_ORC_V16QI:
17296 case P8V_BUILTIN_ORC_V8HI:
17297 case P8V_BUILTIN_ORC_V4SI:
17298 case P8V_BUILTIN_ORC_V4SF:
17299 case P8V_BUILTIN_ORC_V2DF:
17300 case P8V_BUILTIN_ORC_V2DI:
17302 arg0 = gimple_call_arg (stmt, 0);
17303 arg1 = gimple_call_arg (stmt, 1);
17304 lhs = gimple_call_lhs (stmt);
17305 tree temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
17306 gimple *g = gimple_build_assign(temp, BIT_NOT_EXPR, arg1);
17307 gimple_set_location (g, gimple_location (stmt));
17308 gsi_insert_before(gsi, g, GSI_SAME_STMT);
17309 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, temp);
17310 gimple_set_location (g, gimple_location (stmt));
17311 gsi_replace (gsi, g, true);
17312 return true;
17314 /* Flavors of vec_xor. */
17315 case ALTIVEC_BUILTIN_VXOR:
17317 arg0 = gimple_call_arg (stmt, 0);
17318 arg1 = gimple_call_arg (stmt, 1);
17319 lhs = gimple_call_lhs (stmt);
17320 gimple *g = gimple_build_assign (lhs, BIT_XOR_EXPR, arg0, arg1);
17321 gimple_set_location (g, gimple_location (stmt));
17322 gsi_replace (gsi, g, true);
17323 return true;
17325 /* Flavors of vec_nor. */
17326 case ALTIVEC_BUILTIN_VNOR:
17328 arg0 = gimple_call_arg (stmt, 0);
17329 arg1 = gimple_call_arg (stmt, 1);
17330 lhs = gimple_call_lhs (stmt);
17331 tree temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
17332 gimple *g = gimple_build_assign (temp, BIT_IOR_EXPR, arg0, arg1);
17333 gimple_set_location (g, gimple_location (stmt));
17334 gsi_insert_before(gsi, g, GSI_SAME_STMT);
17335 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
17336 gimple_set_location (g, gimple_location (stmt));
17337 gsi_replace (gsi, g, true);
17338 return true;
17340 default:
17341 break;
17344 return false;
17347 /* Expand an expression EXP that calls a built-in function,
17348 with result going to TARGET if that's convenient
17349 (and in mode MODE if that's convenient).
17350 SUBTARGET may be used as the target for computing one of EXP's operands.
17351 IGNORE is nonzero if the value is to be ignored. */
17353 static rtx
17354 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
17355 machine_mode mode ATTRIBUTE_UNUSED,
17356 int ignore ATTRIBUTE_UNUSED)
17358 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
17359 enum rs6000_builtins fcode
17360 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
17361 size_t uns_fcode = (size_t)fcode;
17362 const struct builtin_description *d;
17363 size_t i;
17364 rtx ret;
17365 bool success;
17366 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask;
17367 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
17369 if (TARGET_DEBUG_BUILTIN)
17371 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
17372 const char *name1 = rs6000_builtin_info[uns_fcode].name;
17373 const char *name2 = ((icode != CODE_FOR_nothing)
17374 ? get_insn_name ((int)icode)
17375 : "nothing");
17376 const char *name3;
17378 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
17380 default: name3 = "unknown"; break;
17381 case RS6000_BTC_SPECIAL: name3 = "special"; break;
17382 case RS6000_BTC_UNARY: name3 = "unary"; break;
17383 case RS6000_BTC_BINARY: name3 = "binary"; break;
17384 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
17385 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
17386 case RS6000_BTC_ABS: name3 = "abs"; break;
17387 case RS6000_BTC_EVSEL: name3 = "evsel"; break;
17388 case RS6000_BTC_DST: name3 = "dst"; break;
17392 fprintf (stderr,
17393 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
17394 (name1) ? name1 : "---", fcode,
17395 (name2) ? name2 : "---", (int)icode,
17396 name3,
17397 func_valid_p ? "" : ", not valid");
17400 if (!func_valid_p)
17402 rs6000_invalid_builtin (fcode);
17404 /* Given it is invalid, just generate a normal call. */
17405 return expand_call (exp, target, ignore);
17408 switch (fcode)
17410 case RS6000_BUILTIN_RECIP:
17411 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
17413 case RS6000_BUILTIN_RECIPF:
17414 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
17416 case RS6000_BUILTIN_RSQRTF:
17417 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
17419 case RS6000_BUILTIN_RSQRT:
17420 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
17422 case POWER7_BUILTIN_BPERMD:
17423 return rs6000_expand_binop_builtin (((TARGET_64BIT)
17424 ? CODE_FOR_bpermd_di
17425 : CODE_FOR_bpermd_si), exp, target);
17427 case RS6000_BUILTIN_GET_TB:
17428 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
17429 target);
17431 case RS6000_BUILTIN_MFTB:
17432 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
17433 ? CODE_FOR_rs6000_mftb_di
17434 : CODE_FOR_rs6000_mftb_si),
17435 target);
17437 case RS6000_BUILTIN_MFFS:
17438 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs, target);
17440 case RS6000_BUILTIN_MTFSF:
17441 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf, exp);
17443 case RS6000_BUILTIN_CPU_INIT:
17444 case RS6000_BUILTIN_CPU_IS:
17445 case RS6000_BUILTIN_CPU_SUPPORTS:
17446 return cpu_expand_builtin (fcode, exp, target);
17448 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
17449 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
17451 int icode = (BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr_direct
17452 : (int) CODE_FOR_altivec_lvsl_direct);
17453 machine_mode tmode = insn_data[icode].operand[0].mode;
17454 machine_mode mode = insn_data[icode].operand[1].mode;
17455 tree arg;
17456 rtx op, addr, pat;
17458 gcc_assert (TARGET_ALTIVEC);
17460 arg = CALL_EXPR_ARG (exp, 0);
17461 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
17462 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
17463 addr = memory_address (mode, op);
17464 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
17465 op = addr;
17466 else
17468 /* For the load case need to negate the address. */
17469 op = gen_reg_rtx (GET_MODE (addr));
17470 emit_insn (gen_rtx_SET (op, gen_rtx_NEG (GET_MODE (addr), addr)));
17472 op = gen_rtx_MEM (mode, op);
17474 if (target == 0
17475 || GET_MODE (target) != tmode
17476 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
17477 target = gen_reg_rtx (tmode);
17479 pat = GEN_FCN (icode) (target, op);
17480 if (!pat)
17481 return 0;
17482 emit_insn (pat);
17484 return target;
17487 case ALTIVEC_BUILTIN_VCFUX:
17488 case ALTIVEC_BUILTIN_VCFSX:
17489 case ALTIVEC_BUILTIN_VCTUXS:
17490 case ALTIVEC_BUILTIN_VCTSXS:
17491 /* FIXME: There's got to be a nicer way to handle this case than
17492 constructing a new CALL_EXPR. */
17493 if (call_expr_nargs (exp) == 1)
17495 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
17496 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
17498 break;
17500 default:
17501 break;
17504 if (TARGET_ALTIVEC)
17506 ret = altivec_expand_builtin (exp, target, &success);
17508 if (success)
17509 return ret;
17511 if (TARGET_SPE)
17513 ret = spe_expand_builtin (exp, target, &success);
17515 if (success)
17516 return ret;
17518 if (TARGET_PAIRED_FLOAT)
17520 ret = paired_expand_builtin (exp, target, &success);
17522 if (success)
17523 return ret;
17525 if (TARGET_HTM)
17527 ret = htm_expand_builtin (exp, target, &success);
17529 if (success)
17530 return ret;
17533 unsigned attr = rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK;
17534 /* RS6000_BTC_SPECIAL represents no-operand operators. */
17535 gcc_assert (attr == RS6000_BTC_UNARY
17536 || attr == RS6000_BTC_BINARY
17537 || attr == RS6000_BTC_TERNARY
17538 || attr == RS6000_BTC_SPECIAL);
17540 /* Handle simple unary operations. */
17541 d = bdesc_1arg;
17542 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
17543 if (d->code == fcode)
17544 return rs6000_expand_unop_builtin (d->icode, exp, target);
17546 /* Handle simple binary operations. */
17547 d = bdesc_2arg;
17548 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
17549 if (d->code == fcode)
17550 return rs6000_expand_binop_builtin (d->icode, exp, target);
17552 /* Handle simple ternary operations. */
17553 d = bdesc_3arg;
17554 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
17555 if (d->code == fcode)
17556 return rs6000_expand_ternop_builtin (d->icode, exp, target);
17558 /* Handle simple no-argument operations. */
17559 d = bdesc_0arg;
17560 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
17561 if (d->code == fcode)
17562 return rs6000_expand_zeroop_builtin (d->icode, target);
17564 gcc_unreachable ();
17567 /* Create a builtin vector type with a name. Taking care not to give
17568 the canonical type a name. */
17570 static tree
17571 rs6000_vector_type (const char *name, tree elt_type, unsigned num_elts)
17573 tree result = build_vector_type (elt_type, num_elts);
17575 /* Copy so we don't give the canonical type a name. */
17576 result = build_variant_type_copy (result);
17578 add_builtin_type (name, result);
17580 return result;
17583 static void
17584 rs6000_init_builtins (void)
17586 tree tdecl;
17587 tree ftype;
17588 machine_mode mode;
17590 if (TARGET_DEBUG_BUILTIN)
17591 fprintf (stderr, "rs6000_init_builtins%s%s%s%s\n",
17592 (TARGET_PAIRED_FLOAT) ? ", paired" : "",
17593 (TARGET_SPE) ? ", spe" : "",
17594 (TARGET_ALTIVEC) ? ", altivec" : "",
17595 (TARGET_VSX) ? ", vsx" : "");
17597 V2SI_type_node = build_vector_type (intSI_type_node, 2);
17598 V2SF_type_node = build_vector_type (float_type_node, 2);
17599 V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64 ? "__vector long"
17600 : "__vector long long",
17601 intDI_type_node, 2);
17602 V2DF_type_node = rs6000_vector_type ("__vector double", double_type_node, 2);
17603 V4HI_type_node = build_vector_type (intHI_type_node, 4);
17604 V4SI_type_node = rs6000_vector_type ("__vector signed int",
17605 intSI_type_node, 4);
17606 V4SF_type_node = rs6000_vector_type ("__vector float", float_type_node, 4);
17607 V8HI_type_node = rs6000_vector_type ("__vector signed short",
17608 intHI_type_node, 8);
17609 V16QI_type_node = rs6000_vector_type ("__vector signed char",
17610 intQI_type_node, 16);
17612 unsigned_V16QI_type_node = rs6000_vector_type ("__vector unsigned char",
17613 unsigned_intQI_type_node, 16);
17614 unsigned_V8HI_type_node = rs6000_vector_type ("__vector unsigned short",
17615 unsigned_intHI_type_node, 8);
17616 unsigned_V4SI_type_node = rs6000_vector_type ("__vector unsigned int",
17617 unsigned_intSI_type_node, 4);
17618 unsigned_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
17619 ? "__vector unsigned long"
17620 : "__vector unsigned long long",
17621 unsigned_intDI_type_node, 2);
17623 opaque_V2SF_type_node = build_opaque_vector_type (float_type_node, 2);
17624 opaque_V2SI_type_node = build_opaque_vector_type (intSI_type_node, 2);
17625 opaque_p_V2SI_type_node = build_pointer_type (opaque_V2SI_type_node);
17626 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
17628 const_str_type_node
17629 = build_pointer_type (build_qualified_type (char_type_node,
17630 TYPE_QUAL_CONST));
17632 /* We use V1TI mode as a special container to hold __int128_t items that
17633 must live in VSX registers. */
17634 if (intTI_type_node)
17636 V1TI_type_node = rs6000_vector_type ("__vector __int128",
17637 intTI_type_node, 1);
17638 unsigned_V1TI_type_node
17639 = rs6000_vector_type ("__vector unsigned __int128",
17640 unsigned_intTI_type_node, 1);
17643 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
17644 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
17645 'vector unsigned short'. */
17647 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
17648 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
17649 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
17650 bool_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
17651 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
17653 long_integer_type_internal_node = long_integer_type_node;
17654 long_unsigned_type_internal_node = long_unsigned_type_node;
17655 long_long_integer_type_internal_node = long_long_integer_type_node;
17656 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
17657 intQI_type_internal_node = intQI_type_node;
17658 uintQI_type_internal_node = unsigned_intQI_type_node;
17659 intHI_type_internal_node = intHI_type_node;
17660 uintHI_type_internal_node = unsigned_intHI_type_node;
17661 intSI_type_internal_node = intSI_type_node;
17662 uintSI_type_internal_node = unsigned_intSI_type_node;
17663 intDI_type_internal_node = intDI_type_node;
17664 uintDI_type_internal_node = unsigned_intDI_type_node;
17665 intTI_type_internal_node = intTI_type_node;
17666 uintTI_type_internal_node = unsigned_intTI_type_node;
17667 float_type_internal_node = float_type_node;
17668 double_type_internal_node = double_type_node;
17669 long_double_type_internal_node = long_double_type_node;
17670 dfloat64_type_internal_node = dfloat64_type_node;
17671 dfloat128_type_internal_node = dfloat128_type_node;
17672 void_type_internal_node = void_type_node;
17674 /* 128-bit floating point support. KFmode is IEEE 128-bit floating point.
17675 IFmode is the IBM extended 128-bit format that is a pair of doubles.
17676 TFmode will be either IEEE 128-bit floating point or the IBM double-double
17677 format that uses a pair of doubles, depending on the switches and
17678 defaults.
17680 We do not enable the actual __float128 keyword unless the user explicitly
17681 asks for it, because the library support is not yet complete.
17683 If we don't support for either 128-bit IBM double double or IEEE 128-bit
17684 floating point, we need make sure the type is non-zero or else self-test
17685 fails during bootstrap.
17687 We don't register a built-in type for __ibm128 if the type is the same as
17688 long double. Instead we add a #define for __ibm128 in
17689 rs6000_cpu_cpp_builtins to long double. */
17690 if (TARGET_LONG_DOUBLE_128 && FLOAT128_IEEE_P (TFmode))
17692 ibm128_float_type_node = make_node (REAL_TYPE);
17693 TYPE_PRECISION (ibm128_float_type_node) = 128;
17694 SET_TYPE_MODE (ibm128_float_type_node, IFmode);
17695 layout_type (ibm128_float_type_node);
17697 lang_hooks.types.register_builtin_type (ibm128_float_type_node,
17698 "__ibm128");
17700 else
17701 ibm128_float_type_node = long_double_type_node;
17703 if (TARGET_FLOAT128_KEYWORD)
17705 ieee128_float_type_node = float128_type_node;
17706 lang_hooks.types.register_builtin_type (ieee128_float_type_node,
17707 "__float128");
17710 else if (TARGET_FLOAT128_TYPE)
17712 ieee128_float_type_node = make_node (REAL_TYPE);
17713 TYPE_PRECISION (ibm128_float_type_node) = 128;
17714 SET_TYPE_MODE (ieee128_float_type_node, KFmode);
17715 layout_type (ieee128_float_type_node);
17717 /* If we are not exporting the __float128/_Float128 keywords, we need a
17718 keyword to get the types created. Use __ieee128 as the dummy
17719 keyword. */
17720 lang_hooks.types.register_builtin_type (ieee128_float_type_node,
17721 "__ieee128");
17724 else
17725 ieee128_float_type_node = long_double_type_node;
17727 /* Initialize the modes for builtin_function_type, mapping a machine mode to
17728 tree type node. */
17729 builtin_mode_to_type[QImode][0] = integer_type_node;
17730 builtin_mode_to_type[HImode][0] = integer_type_node;
17731 builtin_mode_to_type[SImode][0] = intSI_type_node;
17732 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
17733 builtin_mode_to_type[DImode][0] = intDI_type_node;
17734 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
17735 builtin_mode_to_type[TImode][0] = intTI_type_node;
17736 builtin_mode_to_type[TImode][1] = unsigned_intTI_type_node;
17737 builtin_mode_to_type[SFmode][0] = float_type_node;
17738 builtin_mode_to_type[DFmode][0] = double_type_node;
17739 builtin_mode_to_type[IFmode][0] = ibm128_float_type_node;
17740 builtin_mode_to_type[KFmode][0] = ieee128_float_type_node;
17741 builtin_mode_to_type[TFmode][0] = long_double_type_node;
17742 builtin_mode_to_type[DDmode][0] = dfloat64_type_node;
17743 builtin_mode_to_type[TDmode][0] = dfloat128_type_node;
17744 builtin_mode_to_type[V1TImode][0] = V1TI_type_node;
17745 builtin_mode_to_type[V1TImode][1] = unsigned_V1TI_type_node;
17746 builtin_mode_to_type[V2SImode][0] = V2SI_type_node;
17747 builtin_mode_to_type[V2SFmode][0] = V2SF_type_node;
17748 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
17749 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
17750 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
17751 builtin_mode_to_type[V4HImode][0] = V4HI_type_node;
17752 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
17753 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
17754 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
17755 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
17756 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
17757 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
17758 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
17760 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
17761 TYPE_NAME (bool_char_type_node) = tdecl;
17763 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
17764 TYPE_NAME (bool_short_type_node) = tdecl;
17766 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
17767 TYPE_NAME (bool_int_type_node) = tdecl;
17769 tdecl = add_builtin_type ("__pixel", pixel_type_node);
17770 TYPE_NAME (pixel_type_node) = tdecl;
17772 bool_V16QI_type_node = rs6000_vector_type ("__vector __bool char",
17773 bool_char_type_node, 16);
17774 bool_V8HI_type_node = rs6000_vector_type ("__vector __bool short",
17775 bool_short_type_node, 8);
17776 bool_V4SI_type_node = rs6000_vector_type ("__vector __bool int",
17777 bool_int_type_node, 4);
17778 bool_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
17779 ? "__vector __bool long"
17780 : "__vector __bool long long",
17781 bool_long_type_node, 2);
17782 pixel_V8HI_type_node = rs6000_vector_type ("__vector __pixel",
17783 pixel_type_node, 8);
17785 /* Paired and SPE builtins are only available if you build a compiler with
17786 the appropriate options, so only create those builtins with the
17787 appropriate compiler option. Create Altivec and VSX builtins on machines
17788 with at least the general purpose extensions (970 and newer) to allow the
17789 use of the target attribute. */
17790 if (TARGET_PAIRED_FLOAT)
17791 paired_init_builtins ();
17792 if (TARGET_SPE)
17793 spe_init_builtins ();
17794 if (TARGET_EXTRA_BUILTINS)
17795 altivec_init_builtins ();
17796 if (TARGET_HTM)
17797 htm_init_builtins ();
17799 if (TARGET_EXTRA_BUILTINS || TARGET_SPE || TARGET_PAIRED_FLOAT)
17800 rs6000_common_init_builtins ();
17802 ftype = build_function_type_list (ieee128_float_type_node,
17803 const_str_type_node, NULL_TREE);
17804 def_builtin ("__builtin_nanq", ftype, RS6000_BUILTIN_NANQ);
17805 def_builtin ("__builtin_nansq", ftype, RS6000_BUILTIN_NANSQ);
17807 ftype = build_function_type_list (ieee128_float_type_node, NULL_TREE);
17808 def_builtin ("__builtin_infq", ftype, RS6000_BUILTIN_INFQ);
17809 def_builtin ("__builtin_huge_valq", ftype, RS6000_BUILTIN_HUGE_VALQ);
17811 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
17812 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
17813 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
17815 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
17816 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
17817 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
17819 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
17820 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
17821 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
17823 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
17824 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
17825 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
17827 mode = (TARGET_64BIT) ? DImode : SImode;
17828 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
17829 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
17830 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
17832 ftype = build_function_type_list (unsigned_intDI_type_node,
17833 NULL_TREE);
17834 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
17836 if (TARGET_64BIT)
17837 ftype = build_function_type_list (unsigned_intDI_type_node,
17838 NULL_TREE);
17839 else
17840 ftype = build_function_type_list (unsigned_intSI_type_node,
17841 NULL_TREE);
17842 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
17844 ftype = build_function_type_list (double_type_node, NULL_TREE);
17845 def_builtin ("__builtin_mffs", ftype, RS6000_BUILTIN_MFFS);
17847 ftype = build_function_type_list (void_type_node,
17848 intSI_type_node, double_type_node,
17849 NULL_TREE);
17850 def_builtin ("__builtin_mtfsf", ftype, RS6000_BUILTIN_MTFSF);
17852 ftype = build_function_type_list (void_type_node, NULL_TREE);
17853 def_builtin ("__builtin_cpu_init", ftype, RS6000_BUILTIN_CPU_INIT);
17855 ftype = build_function_type_list (bool_int_type_node, const_ptr_type_node,
17856 NULL_TREE);
17857 def_builtin ("__builtin_cpu_is", ftype, RS6000_BUILTIN_CPU_IS);
17858 def_builtin ("__builtin_cpu_supports", ftype, RS6000_BUILTIN_CPU_SUPPORTS);
17860 /* AIX libm provides clog as __clog. */
17861 if (TARGET_XCOFF &&
17862 (tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
17863 set_user_assembler_name (tdecl, "__clog");
17865 #ifdef SUBTARGET_INIT_BUILTINS
17866 SUBTARGET_INIT_BUILTINS;
17867 #endif
17870 /* Returns the rs6000 builtin decl for CODE. */
17872 static tree
17873 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
17875 HOST_WIDE_INT fnmask;
17877 if (code >= RS6000_BUILTIN_COUNT)
17878 return error_mark_node;
17880 fnmask = rs6000_builtin_info[code].mask;
17881 if ((fnmask & rs6000_builtin_mask) != fnmask)
17883 rs6000_invalid_builtin ((enum rs6000_builtins)code);
17884 return error_mark_node;
17887 return rs6000_builtin_decls[code];
17890 static void
17891 spe_init_builtins (void)
17893 tree puint_type_node = build_pointer_type (unsigned_type_node);
17894 tree pushort_type_node = build_pointer_type (short_unsigned_type_node);
17895 const struct builtin_description *d;
17896 size_t i;
17897 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17899 tree v2si_ftype_4_v2si
17900 = build_function_type_list (opaque_V2SI_type_node,
17901 opaque_V2SI_type_node,
17902 opaque_V2SI_type_node,
17903 opaque_V2SI_type_node,
17904 opaque_V2SI_type_node,
17905 NULL_TREE);
17907 tree v2sf_ftype_4_v2sf
17908 = build_function_type_list (opaque_V2SF_type_node,
17909 opaque_V2SF_type_node,
17910 opaque_V2SF_type_node,
17911 opaque_V2SF_type_node,
17912 opaque_V2SF_type_node,
17913 NULL_TREE);
17915 tree int_ftype_int_v2si_v2si
17916 = build_function_type_list (integer_type_node,
17917 integer_type_node,
17918 opaque_V2SI_type_node,
17919 opaque_V2SI_type_node,
17920 NULL_TREE);
17922 tree int_ftype_int_v2sf_v2sf
17923 = build_function_type_list (integer_type_node,
17924 integer_type_node,
17925 opaque_V2SF_type_node,
17926 opaque_V2SF_type_node,
17927 NULL_TREE);
17929 tree void_ftype_v2si_puint_int
17930 = build_function_type_list (void_type_node,
17931 opaque_V2SI_type_node,
17932 puint_type_node,
17933 integer_type_node,
17934 NULL_TREE);
17936 tree void_ftype_v2si_puint_char
17937 = build_function_type_list (void_type_node,
17938 opaque_V2SI_type_node,
17939 puint_type_node,
17940 char_type_node,
17941 NULL_TREE);
17943 tree void_ftype_v2si_pv2si_int
17944 = build_function_type_list (void_type_node,
17945 opaque_V2SI_type_node,
17946 opaque_p_V2SI_type_node,
17947 integer_type_node,
17948 NULL_TREE);
17950 tree void_ftype_v2si_pv2si_char
17951 = build_function_type_list (void_type_node,
17952 opaque_V2SI_type_node,
17953 opaque_p_V2SI_type_node,
17954 char_type_node,
17955 NULL_TREE);
17957 tree void_ftype_int
17958 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
17960 tree int_ftype_void
17961 = build_function_type_list (integer_type_node, NULL_TREE);
17963 tree v2si_ftype_pv2si_int
17964 = build_function_type_list (opaque_V2SI_type_node,
17965 opaque_p_V2SI_type_node,
17966 integer_type_node,
17967 NULL_TREE);
17969 tree v2si_ftype_puint_int
17970 = build_function_type_list (opaque_V2SI_type_node,
17971 puint_type_node,
17972 integer_type_node,
17973 NULL_TREE);
17975 tree v2si_ftype_pushort_int
17976 = build_function_type_list (opaque_V2SI_type_node,
17977 pushort_type_node,
17978 integer_type_node,
17979 NULL_TREE);
17981 tree v2si_ftype_signed_char
17982 = build_function_type_list (opaque_V2SI_type_node,
17983 signed_char_type_node,
17984 NULL_TREE);
17986 add_builtin_type ("__ev64_opaque__", opaque_V2SI_type_node);
17988 /* Initialize irregular SPE builtins. */
17990 def_builtin ("__builtin_spe_mtspefscr", void_ftype_int, SPE_BUILTIN_MTSPEFSCR);
17991 def_builtin ("__builtin_spe_mfspefscr", int_ftype_void, SPE_BUILTIN_MFSPEFSCR);
17992 def_builtin ("__builtin_spe_evstddx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDDX);
17993 def_builtin ("__builtin_spe_evstdhx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDHX);
17994 def_builtin ("__builtin_spe_evstdwx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDWX);
17995 def_builtin ("__builtin_spe_evstwhex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHEX);
17996 def_builtin ("__builtin_spe_evstwhox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHOX);
17997 def_builtin ("__builtin_spe_evstwwex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWEX);
17998 def_builtin ("__builtin_spe_evstwwox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWOX);
17999 def_builtin ("__builtin_spe_evstdd", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDD);
18000 def_builtin ("__builtin_spe_evstdh", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDH);
18001 def_builtin ("__builtin_spe_evstdw", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDW);
18002 def_builtin ("__builtin_spe_evstwhe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHE);
18003 def_builtin ("__builtin_spe_evstwho", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHO);
18004 def_builtin ("__builtin_spe_evstwwe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWE);
18005 def_builtin ("__builtin_spe_evstwwo", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWO);
18006 def_builtin ("__builtin_spe_evsplatfi", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATFI);
18007 def_builtin ("__builtin_spe_evsplati", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATI);
18009 /* Loads. */
18010 def_builtin ("__builtin_spe_evlddx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDDX);
18011 def_builtin ("__builtin_spe_evldwx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDWX);
18012 def_builtin ("__builtin_spe_evldhx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDHX);
18013 def_builtin ("__builtin_spe_evlwhex", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHEX);
18014 def_builtin ("__builtin_spe_evlwhoux", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOUX);
18015 def_builtin ("__builtin_spe_evlwhosx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOSX);
18016 def_builtin ("__builtin_spe_evlwwsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLATX);
18017 def_builtin ("__builtin_spe_evlwhsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLATX);
18018 def_builtin ("__builtin_spe_evlhhesplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLATX);
18019 def_builtin ("__builtin_spe_evlhhousplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLATX);
18020 def_builtin ("__builtin_spe_evlhhossplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLATX);
18021 def_builtin ("__builtin_spe_evldd", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDD);
18022 def_builtin ("__builtin_spe_evldw", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDW);
18023 def_builtin ("__builtin_spe_evldh", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDH);
18024 def_builtin ("__builtin_spe_evlhhesplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLAT);
18025 def_builtin ("__builtin_spe_evlhhossplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLAT);
18026 def_builtin ("__builtin_spe_evlhhousplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLAT);
18027 def_builtin ("__builtin_spe_evlwhe", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHE);
18028 def_builtin ("__builtin_spe_evlwhos", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOS);
18029 def_builtin ("__builtin_spe_evlwhou", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOU);
18030 def_builtin ("__builtin_spe_evlwhsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLAT);
18031 def_builtin ("__builtin_spe_evlwwsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLAT);
18033 /* Predicates. */
18034 d = bdesc_spe_predicates;
18035 for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, d++)
18037 tree type;
18038 HOST_WIDE_INT mask = d->mask;
18040 if ((mask & builtin_mask) != mask)
18042 if (TARGET_DEBUG_BUILTIN)
18043 fprintf (stderr, "spe_init_builtins, skip predicate %s\n",
18044 d->name);
18045 continue;
18048 /* Cannot define builtin if the instruction is disabled. */
18049 gcc_assert (d->icode != CODE_FOR_nothing);
18050 switch (insn_data[d->icode].operand[1].mode)
18052 case V2SImode:
18053 type = int_ftype_int_v2si_v2si;
18054 break;
18055 case V2SFmode:
18056 type = int_ftype_int_v2sf_v2sf;
18057 break;
18058 default:
18059 gcc_unreachable ();
18062 def_builtin (d->name, type, d->code);
18065 /* Evsel predicates. */
18066 d = bdesc_spe_evsel;
18067 for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, d++)
18069 tree type;
18070 HOST_WIDE_INT mask = d->mask;
18072 if ((mask & builtin_mask) != mask)
18074 if (TARGET_DEBUG_BUILTIN)
18075 fprintf (stderr, "spe_init_builtins, skip evsel %s\n",
18076 d->name);
18077 continue;
18080 /* Cannot define builtin if the instruction is disabled. */
18081 gcc_assert (d->icode != CODE_FOR_nothing);
18082 switch (insn_data[d->icode].operand[1].mode)
18084 case V2SImode:
18085 type = v2si_ftype_4_v2si;
18086 break;
18087 case V2SFmode:
18088 type = v2sf_ftype_4_v2sf;
18089 break;
18090 default:
18091 gcc_unreachable ();
18094 def_builtin (d->name, type, d->code);
18098 static void
18099 paired_init_builtins (void)
18101 const struct builtin_description *d;
18102 size_t i;
18103 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
18105 tree int_ftype_int_v2sf_v2sf
18106 = build_function_type_list (integer_type_node,
18107 integer_type_node,
18108 V2SF_type_node,
18109 V2SF_type_node,
18110 NULL_TREE);
18111 tree pcfloat_type_node =
18112 build_pointer_type (build_qualified_type
18113 (float_type_node, TYPE_QUAL_CONST));
18115 tree v2sf_ftype_long_pcfloat = build_function_type_list (V2SF_type_node,
18116 long_integer_type_node,
18117 pcfloat_type_node,
18118 NULL_TREE);
18119 tree void_ftype_v2sf_long_pcfloat =
18120 build_function_type_list (void_type_node,
18121 V2SF_type_node,
18122 long_integer_type_node,
18123 pcfloat_type_node,
18124 NULL_TREE);
18127 def_builtin ("__builtin_paired_lx", v2sf_ftype_long_pcfloat,
18128 PAIRED_BUILTIN_LX);
18131 def_builtin ("__builtin_paired_stx", void_ftype_v2sf_long_pcfloat,
18132 PAIRED_BUILTIN_STX);
18134 /* Predicates. */
18135 d = bdesc_paired_preds;
18136 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); ++i, d++)
18138 tree type;
18139 HOST_WIDE_INT mask = d->mask;
18141 if ((mask & builtin_mask) != mask)
18143 if (TARGET_DEBUG_BUILTIN)
18144 fprintf (stderr, "paired_init_builtins, skip predicate %s\n",
18145 d->name);
18146 continue;
18149 /* Cannot define builtin if the instruction is disabled. */
18150 gcc_assert (d->icode != CODE_FOR_nothing);
18152 if (TARGET_DEBUG_BUILTIN)
18153 fprintf (stderr, "paired pred #%d, insn = %s [%d], mode = %s\n",
18154 (int)i, get_insn_name (d->icode), (int)d->icode,
18155 GET_MODE_NAME (insn_data[d->icode].operand[1].mode));
18157 switch (insn_data[d->icode].operand[1].mode)
18159 case V2SFmode:
18160 type = int_ftype_int_v2sf_v2sf;
18161 break;
18162 default:
18163 gcc_unreachable ();
18166 def_builtin (d->name, type, d->code);
18170 static void
18171 altivec_init_builtins (void)
18173 const struct builtin_description *d;
18174 size_t i;
18175 tree ftype;
18176 tree decl;
18177 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
18179 tree pvoid_type_node = build_pointer_type (void_type_node);
18181 tree pcvoid_type_node
18182 = build_pointer_type (build_qualified_type (void_type_node,
18183 TYPE_QUAL_CONST));
18185 tree int_ftype_opaque
18186 = build_function_type_list (integer_type_node,
18187 opaque_V4SI_type_node, NULL_TREE);
18188 tree opaque_ftype_opaque
18189 = build_function_type_list (integer_type_node, NULL_TREE);
18190 tree opaque_ftype_opaque_int
18191 = build_function_type_list (opaque_V4SI_type_node,
18192 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
18193 tree opaque_ftype_opaque_opaque_int
18194 = build_function_type_list (opaque_V4SI_type_node,
18195 opaque_V4SI_type_node, opaque_V4SI_type_node,
18196 integer_type_node, NULL_TREE);
18197 tree opaque_ftype_opaque_opaque_opaque
18198 = build_function_type_list (opaque_V4SI_type_node,
18199 opaque_V4SI_type_node, opaque_V4SI_type_node,
18200 opaque_V4SI_type_node, NULL_TREE);
18201 tree opaque_ftype_opaque_opaque
18202 = build_function_type_list (opaque_V4SI_type_node,
18203 opaque_V4SI_type_node, opaque_V4SI_type_node,
18204 NULL_TREE);
18205 tree int_ftype_int_opaque_opaque
18206 = build_function_type_list (integer_type_node,
18207 integer_type_node, opaque_V4SI_type_node,
18208 opaque_V4SI_type_node, NULL_TREE);
18209 tree int_ftype_int_v4si_v4si
18210 = build_function_type_list (integer_type_node,
18211 integer_type_node, V4SI_type_node,
18212 V4SI_type_node, NULL_TREE);
18213 tree int_ftype_int_v2di_v2di
18214 = build_function_type_list (integer_type_node,
18215 integer_type_node, V2DI_type_node,
18216 V2DI_type_node, NULL_TREE);
18217 tree void_ftype_v4si
18218 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
18219 tree v8hi_ftype_void
18220 = build_function_type_list (V8HI_type_node, NULL_TREE);
18221 tree void_ftype_void
18222 = build_function_type_list (void_type_node, NULL_TREE);
18223 tree void_ftype_int
18224 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
18226 tree opaque_ftype_long_pcvoid
18227 = build_function_type_list (opaque_V4SI_type_node,
18228 long_integer_type_node, pcvoid_type_node,
18229 NULL_TREE);
18230 tree v16qi_ftype_long_pcvoid
18231 = build_function_type_list (V16QI_type_node,
18232 long_integer_type_node, pcvoid_type_node,
18233 NULL_TREE);
18234 tree v8hi_ftype_long_pcvoid
18235 = build_function_type_list (V8HI_type_node,
18236 long_integer_type_node, pcvoid_type_node,
18237 NULL_TREE);
18238 tree v4si_ftype_long_pcvoid
18239 = build_function_type_list (V4SI_type_node,
18240 long_integer_type_node, pcvoid_type_node,
18241 NULL_TREE);
18242 tree v4sf_ftype_long_pcvoid
18243 = build_function_type_list (V4SF_type_node,
18244 long_integer_type_node, pcvoid_type_node,
18245 NULL_TREE);
18246 tree v2df_ftype_long_pcvoid
18247 = build_function_type_list (V2DF_type_node,
18248 long_integer_type_node, pcvoid_type_node,
18249 NULL_TREE);
18250 tree v2di_ftype_long_pcvoid
18251 = build_function_type_list (V2DI_type_node,
18252 long_integer_type_node, pcvoid_type_node,
18253 NULL_TREE);
18255 tree void_ftype_opaque_long_pvoid
18256 = build_function_type_list (void_type_node,
18257 opaque_V4SI_type_node, long_integer_type_node,
18258 pvoid_type_node, NULL_TREE);
18259 tree void_ftype_v4si_long_pvoid
18260 = build_function_type_list (void_type_node,
18261 V4SI_type_node, long_integer_type_node,
18262 pvoid_type_node, NULL_TREE);
18263 tree void_ftype_v16qi_long_pvoid
18264 = build_function_type_list (void_type_node,
18265 V16QI_type_node, long_integer_type_node,
18266 pvoid_type_node, NULL_TREE);
18268 tree void_ftype_v16qi_pvoid_long
18269 = build_function_type_list (void_type_node,
18270 V16QI_type_node, pvoid_type_node,
18271 long_integer_type_node, NULL_TREE);
18273 tree void_ftype_v8hi_long_pvoid
18274 = build_function_type_list (void_type_node,
18275 V8HI_type_node, long_integer_type_node,
18276 pvoid_type_node, NULL_TREE);
18277 tree void_ftype_v4sf_long_pvoid
18278 = build_function_type_list (void_type_node,
18279 V4SF_type_node, long_integer_type_node,
18280 pvoid_type_node, NULL_TREE);
18281 tree void_ftype_v2df_long_pvoid
18282 = build_function_type_list (void_type_node,
18283 V2DF_type_node, long_integer_type_node,
18284 pvoid_type_node, NULL_TREE);
18285 tree void_ftype_v2di_long_pvoid
18286 = build_function_type_list (void_type_node,
18287 V2DI_type_node, long_integer_type_node,
18288 pvoid_type_node, NULL_TREE);
18289 tree int_ftype_int_v8hi_v8hi
18290 = build_function_type_list (integer_type_node,
18291 integer_type_node, V8HI_type_node,
18292 V8HI_type_node, NULL_TREE);
18293 tree int_ftype_int_v16qi_v16qi
18294 = build_function_type_list (integer_type_node,
18295 integer_type_node, V16QI_type_node,
18296 V16QI_type_node, NULL_TREE);
18297 tree int_ftype_int_v4sf_v4sf
18298 = build_function_type_list (integer_type_node,
18299 integer_type_node, V4SF_type_node,
18300 V4SF_type_node, NULL_TREE);
18301 tree int_ftype_int_v2df_v2df
18302 = build_function_type_list (integer_type_node,
18303 integer_type_node, V2DF_type_node,
18304 V2DF_type_node, NULL_TREE);
18305 tree v2di_ftype_v2di
18306 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
18307 tree v4si_ftype_v4si
18308 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
18309 tree v8hi_ftype_v8hi
18310 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
18311 tree v16qi_ftype_v16qi
18312 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
18313 tree v4sf_ftype_v4sf
18314 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
18315 tree v2df_ftype_v2df
18316 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
18317 tree void_ftype_pcvoid_int_int
18318 = build_function_type_list (void_type_node,
18319 pcvoid_type_node, integer_type_node,
18320 integer_type_node, NULL_TREE);
18322 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
18323 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
18324 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
18325 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
18326 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
18327 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
18328 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
18329 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
18330 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
18331 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
18332 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid,
18333 ALTIVEC_BUILTIN_LVXL_V2DF);
18334 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid,
18335 ALTIVEC_BUILTIN_LVXL_V2DI);
18336 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid,
18337 ALTIVEC_BUILTIN_LVXL_V4SF);
18338 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid,
18339 ALTIVEC_BUILTIN_LVXL_V4SI);
18340 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid,
18341 ALTIVEC_BUILTIN_LVXL_V8HI);
18342 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid,
18343 ALTIVEC_BUILTIN_LVXL_V16QI);
18344 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
18345 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid,
18346 ALTIVEC_BUILTIN_LVX_V2DF);
18347 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid,
18348 ALTIVEC_BUILTIN_LVX_V2DI);
18349 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid,
18350 ALTIVEC_BUILTIN_LVX_V4SF);
18351 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid,
18352 ALTIVEC_BUILTIN_LVX_V4SI);
18353 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid,
18354 ALTIVEC_BUILTIN_LVX_V8HI);
18355 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid,
18356 ALTIVEC_BUILTIN_LVX_V16QI);
18357 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
18358 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid,
18359 ALTIVEC_BUILTIN_STVX_V2DF);
18360 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid,
18361 ALTIVEC_BUILTIN_STVX_V2DI);
18362 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid,
18363 ALTIVEC_BUILTIN_STVX_V4SF);
18364 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid,
18365 ALTIVEC_BUILTIN_STVX_V4SI);
18366 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid,
18367 ALTIVEC_BUILTIN_STVX_V8HI);
18368 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid,
18369 ALTIVEC_BUILTIN_STVX_V16QI);
18370 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
18371 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
18372 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid,
18373 ALTIVEC_BUILTIN_STVXL_V2DF);
18374 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid,
18375 ALTIVEC_BUILTIN_STVXL_V2DI);
18376 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid,
18377 ALTIVEC_BUILTIN_STVXL_V4SF);
18378 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid,
18379 ALTIVEC_BUILTIN_STVXL_V4SI);
18380 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid,
18381 ALTIVEC_BUILTIN_STVXL_V8HI);
18382 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid,
18383 ALTIVEC_BUILTIN_STVXL_V16QI);
18384 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
18385 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
18386 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
18387 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
18388 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
18389 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
18390 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
18391 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
18392 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
18393 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
18394 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
18395 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
18396 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
18397 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
18398 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
18399 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
18401 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
18402 VSX_BUILTIN_LXVD2X_V2DF);
18403 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
18404 VSX_BUILTIN_LXVD2X_V2DI);
18405 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
18406 VSX_BUILTIN_LXVW4X_V4SF);
18407 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
18408 VSX_BUILTIN_LXVW4X_V4SI);
18409 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
18410 VSX_BUILTIN_LXVW4X_V8HI);
18411 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
18412 VSX_BUILTIN_LXVW4X_V16QI);
18413 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
18414 VSX_BUILTIN_STXVD2X_V2DF);
18415 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
18416 VSX_BUILTIN_STXVD2X_V2DI);
18417 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
18418 VSX_BUILTIN_STXVW4X_V4SF);
18419 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
18420 VSX_BUILTIN_STXVW4X_V4SI);
18421 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
18422 VSX_BUILTIN_STXVW4X_V8HI);
18423 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
18424 VSX_BUILTIN_STXVW4X_V16QI);
18426 def_builtin ("__builtin_vsx_ld_elemrev_v2df", v2df_ftype_long_pcvoid,
18427 VSX_BUILTIN_LD_ELEMREV_V2DF);
18428 def_builtin ("__builtin_vsx_ld_elemrev_v2di", v2di_ftype_long_pcvoid,
18429 VSX_BUILTIN_LD_ELEMREV_V2DI);
18430 def_builtin ("__builtin_vsx_ld_elemrev_v4sf", v4sf_ftype_long_pcvoid,
18431 VSX_BUILTIN_LD_ELEMREV_V4SF);
18432 def_builtin ("__builtin_vsx_ld_elemrev_v4si", v4si_ftype_long_pcvoid,
18433 VSX_BUILTIN_LD_ELEMREV_V4SI);
18434 def_builtin ("__builtin_vsx_st_elemrev_v2df", void_ftype_v2df_long_pvoid,
18435 VSX_BUILTIN_ST_ELEMREV_V2DF);
18436 def_builtin ("__builtin_vsx_st_elemrev_v2di", void_ftype_v2di_long_pvoid,
18437 VSX_BUILTIN_ST_ELEMREV_V2DI);
18438 def_builtin ("__builtin_vsx_st_elemrev_v4sf", void_ftype_v4sf_long_pvoid,
18439 VSX_BUILTIN_ST_ELEMREV_V4SF);
18440 def_builtin ("__builtin_vsx_st_elemrev_v4si", void_ftype_v4si_long_pvoid,
18441 VSX_BUILTIN_ST_ELEMREV_V4SI);
18443 if (TARGET_P9_VECTOR)
18445 def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid,
18446 VSX_BUILTIN_LD_ELEMREV_V8HI);
18447 def_builtin ("__builtin_vsx_ld_elemrev_v16qi", v16qi_ftype_long_pcvoid,
18448 VSX_BUILTIN_LD_ELEMREV_V16QI);
18449 def_builtin ("__builtin_vsx_st_elemrev_v8hi",
18450 void_ftype_v8hi_long_pvoid, VSX_BUILTIN_ST_ELEMREV_V8HI);
18451 def_builtin ("__builtin_vsx_st_elemrev_v16qi",
18452 void_ftype_v16qi_long_pvoid, VSX_BUILTIN_ST_ELEMREV_V16QI);
18454 else
18456 rs6000_builtin_decls[(int) VSX_BUILTIN_LD_ELEMREV_V8HI]
18457 = rs6000_builtin_decls[(int) VSX_BUILTIN_LXVW4X_V8HI];
18458 rs6000_builtin_decls[(int) VSX_BUILTIN_LD_ELEMREV_V16QI]
18459 = rs6000_builtin_decls[(int) VSX_BUILTIN_LXVW4X_V16QI];
18460 rs6000_builtin_decls[(int) VSX_BUILTIN_ST_ELEMREV_V8HI]
18461 = rs6000_builtin_decls[(int) VSX_BUILTIN_STXVW4X_V8HI];
18462 rs6000_builtin_decls[(int) VSX_BUILTIN_ST_ELEMREV_V16QI]
18463 = rs6000_builtin_decls[(int) VSX_BUILTIN_STXVW4X_V16QI];
18466 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
18467 VSX_BUILTIN_VEC_LD);
18468 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
18469 VSX_BUILTIN_VEC_ST);
18470 def_builtin ("__builtin_vec_xl", opaque_ftype_long_pcvoid,
18471 VSX_BUILTIN_VEC_XL);
18472 def_builtin ("__builtin_vec_xst", void_ftype_opaque_long_pvoid,
18473 VSX_BUILTIN_VEC_XST);
18475 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
18476 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
18477 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
18479 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
18480 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
18481 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
18482 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
18483 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
18484 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
18485 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
18486 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
18487 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
18488 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
18489 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
18490 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
18492 def_builtin ("__builtin_vec_adde", opaque_ftype_opaque_opaque_opaque,
18493 ALTIVEC_BUILTIN_VEC_ADDE);
18494 def_builtin ("__builtin_vec_addec", opaque_ftype_opaque_opaque_opaque,
18495 ALTIVEC_BUILTIN_VEC_ADDEC);
18496 def_builtin ("__builtin_vec_cmpne", opaque_ftype_opaque_opaque,
18497 ALTIVEC_BUILTIN_VEC_CMPNE);
18498 def_builtin ("__builtin_vec_mul", opaque_ftype_opaque_opaque,
18499 ALTIVEC_BUILTIN_VEC_MUL);
18501 /* Cell builtins. */
18502 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
18503 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
18504 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
18505 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
18507 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
18508 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
18509 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
18510 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
18512 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
18513 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
18514 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
18515 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
18517 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
18518 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
18519 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
18520 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
18522 if (TARGET_P9_VECTOR)
18523 def_builtin ("__builtin_altivec_stxvl", void_ftype_v16qi_pvoid_long,
18524 P9V_BUILTIN_STXVL);
18526 /* Add the DST variants. */
18527 d = bdesc_dst;
18528 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
18530 HOST_WIDE_INT mask = d->mask;
18532 /* It is expected that these dst built-in functions may have
18533 d->icode equal to CODE_FOR_nothing. */
18534 if ((mask & builtin_mask) != mask)
18536 if (TARGET_DEBUG_BUILTIN)
18537 fprintf (stderr, "altivec_init_builtins, skip dst %s\n",
18538 d->name);
18539 continue;
18541 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
18544 /* Initialize the predicates. */
18545 d = bdesc_altivec_preds;
18546 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
18548 machine_mode mode1;
18549 tree type;
18550 HOST_WIDE_INT mask = d->mask;
18552 if ((mask & builtin_mask) != mask)
18554 if (TARGET_DEBUG_BUILTIN)
18555 fprintf (stderr, "altivec_init_builtins, skip predicate %s\n",
18556 d->name);
18557 continue;
18560 if (rs6000_overloaded_builtin_p (d->code))
18561 mode1 = VOIDmode;
18562 else
18564 /* Cannot define builtin if the instruction is disabled. */
18565 gcc_assert (d->icode != CODE_FOR_nothing);
18566 mode1 = insn_data[d->icode].operand[1].mode;
18569 switch (mode1)
18571 case VOIDmode:
18572 type = int_ftype_int_opaque_opaque;
18573 break;
18574 case V2DImode:
18575 type = int_ftype_int_v2di_v2di;
18576 break;
18577 case V4SImode:
18578 type = int_ftype_int_v4si_v4si;
18579 break;
18580 case V8HImode:
18581 type = int_ftype_int_v8hi_v8hi;
18582 break;
18583 case V16QImode:
18584 type = int_ftype_int_v16qi_v16qi;
18585 break;
18586 case V4SFmode:
18587 type = int_ftype_int_v4sf_v4sf;
18588 break;
18589 case V2DFmode:
18590 type = int_ftype_int_v2df_v2df;
18591 break;
18592 default:
18593 gcc_unreachable ();
18596 def_builtin (d->name, type, d->code);
18599 /* Initialize the abs* operators. */
18600 d = bdesc_abs;
18601 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
18603 machine_mode mode0;
18604 tree type;
18605 HOST_WIDE_INT mask = d->mask;
18607 if ((mask & builtin_mask) != mask)
18609 if (TARGET_DEBUG_BUILTIN)
18610 fprintf (stderr, "altivec_init_builtins, skip abs %s\n",
18611 d->name);
18612 continue;
18615 /* Cannot define builtin if the instruction is disabled. */
18616 gcc_assert (d->icode != CODE_FOR_nothing);
18617 mode0 = insn_data[d->icode].operand[0].mode;
18619 switch (mode0)
18621 case V2DImode:
18622 type = v2di_ftype_v2di;
18623 break;
18624 case V4SImode:
18625 type = v4si_ftype_v4si;
18626 break;
18627 case V8HImode:
18628 type = v8hi_ftype_v8hi;
18629 break;
18630 case V16QImode:
18631 type = v16qi_ftype_v16qi;
18632 break;
18633 case V4SFmode:
18634 type = v4sf_ftype_v4sf;
18635 break;
18636 case V2DFmode:
18637 type = v2df_ftype_v2df;
18638 break;
18639 default:
18640 gcc_unreachable ();
18643 def_builtin (d->name, type, d->code);
18646 /* Initialize target builtin that implements
18647 targetm.vectorize.builtin_mask_for_load. */
18649 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
18650 v16qi_ftype_long_pcvoid,
18651 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
18652 BUILT_IN_MD, NULL, NULL_TREE);
18653 TREE_READONLY (decl) = 1;
18654 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
18655 altivec_builtin_mask_for_load = decl;
18657 /* Access to the vec_init patterns. */
18658 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
18659 integer_type_node, integer_type_node,
18660 integer_type_node, NULL_TREE);
18661 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
18663 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
18664 short_integer_type_node,
18665 short_integer_type_node,
18666 short_integer_type_node,
18667 short_integer_type_node,
18668 short_integer_type_node,
18669 short_integer_type_node,
18670 short_integer_type_node, NULL_TREE);
18671 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
18673 ftype = build_function_type_list (V16QI_type_node, char_type_node,
18674 char_type_node, char_type_node,
18675 char_type_node, char_type_node,
18676 char_type_node, char_type_node,
18677 char_type_node, char_type_node,
18678 char_type_node, char_type_node,
18679 char_type_node, char_type_node,
18680 char_type_node, char_type_node,
18681 char_type_node, NULL_TREE);
18682 def_builtin ("__builtin_vec_init_v16qi", ftype,
18683 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
18685 ftype = build_function_type_list (V4SF_type_node, float_type_node,
18686 float_type_node, float_type_node,
18687 float_type_node, NULL_TREE);
18688 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
18690 /* VSX builtins. */
18691 ftype = build_function_type_list (V2DF_type_node, double_type_node,
18692 double_type_node, NULL_TREE);
18693 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
18695 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
18696 intDI_type_node, NULL_TREE);
18697 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
18699 /* Access to the vec_set patterns. */
18700 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
18701 intSI_type_node,
18702 integer_type_node, NULL_TREE);
18703 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
18705 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
18706 intHI_type_node,
18707 integer_type_node, NULL_TREE);
18708 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
18710 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
18711 intQI_type_node,
18712 integer_type_node, NULL_TREE);
18713 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
18715 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
18716 float_type_node,
18717 integer_type_node, NULL_TREE);
18718 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
18720 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
18721 double_type_node,
18722 integer_type_node, NULL_TREE);
18723 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
18725 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
18726 intDI_type_node,
18727 integer_type_node, NULL_TREE);
18728 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
18730 /* Access to the vec_extract patterns. */
18731 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
18732 integer_type_node, NULL_TREE);
18733 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
18735 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
18736 integer_type_node, NULL_TREE);
18737 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
18739 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
18740 integer_type_node, NULL_TREE);
18741 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
18743 ftype = build_function_type_list (float_type_node, V4SF_type_node,
18744 integer_type_node, NULL_TREE);
18745 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
18747 ftype = build_function_type_list (double_type_node, V2DF_type_node,
18748 integer_type_node, NULL_TREE);
18749 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
18751 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
18752 integer_type_node, NULL_TREE);
18753 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
18756 if (V1TI_type_node)
18758 tree v1ti_ftype_long_pcvoid
18759 = build_function_type_list (V1TI_type_node,
18760 long_integer_type_node, pcvoid_type_node,
18761 NULL_TREE);
18762 tree void_ftype_v1ti_long_pvoid
18763 = build_function_type_list (void_type_node,
18764 V1TI_type_node, long_integer_type_node,
18765 pvoid_type_node, NULL_TREE);
18766 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid,
18767 VSX_BUILTIN_LXVD2X_V1TI);
18768 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid,
18769 VSX_BUILTIN_STXVD2X_V1TI);
18770 ftype = build_function_type_list (V1TI_type_node, intTI_type_node,
18771 NULL_TREE, NULL_TREE);
18772 def_builtin ("__builtin_vec_init_v1ti", ftype, VSX_BUILTIN_VEC_INIT_V1TI);
18773 ftype = build_function_type_list (V1TI_type_node, V1TI_type_node,
18774 intTI_type_node,
18775 integer_type_node, NULL_TREE);
18776 def_builtin ("__builtin_vec_set_v1ti", ftype, VSX_BUILTIN_VEC_SET_V1TI);
18777 ftype = build_function_type_list (intTI_type_node, V1TI_type_node,
18778 integer_type_node, NULL_TREE);
18779 def_builtin ("__builtin_vec_ext_v1ti", ftype, VSX_BUILTIN_VEC_EXT_V1TI);
18784 static void
18785 htm_init_builtins (void)
18787 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
18788 const struct builtin_description *d;
18789 size_t i;
18791 d = bdesc_htm;
18792 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
18794 tree op[MAX_HTM_OPERANDS], type;
18795 HOST_WIDE_INT mask = d->mask;
18796 unsigned attr = rs6000_builtin_info[d->code].attr;
18797 bool void_func = (attr & RS6000_BTC_VOID);
18798 int attr_args = (attr & RS6000_BTC_TYPE_MASK);
18799 int nopnds = 0;
18800 tree gpr_type_node;
18801 tree rettype;
18802 tree argtype;
18804 /* It is expected that these htm built-in functions may have
18805 d->icode equal to CODE_FOR_nothing. */
18807 if (TARGET_32BIT && TARGET_POWERPC64)
18808 gpr_type_node = long_long_unsigned_type_node;
18809 else
18810 gpr_type_node = long_unsigned_type_node;
18812 if (attr & RS6000_BTC_SPR)
18814 rettype = gpr_type_node;
18815 argtype = gpr_type_node;
18817 else if (d->code == HTM_BUILTIN_TABORTDC
18818 || d->code == HTM_BUILTIN_TABORTDCI)
18820 rettype = unsigned_type_node;
18821 argtype = gpr_type_node;
18823 else
18825 rettype = unsigned_type_node;
18826 argtype = unsigned_type_node;
18829 if ((mask & builtin_mask) != mask)
18831 if (TARGET_DEBUG_BUILTIN)
18832 fprintf (stderr, "htm_builtin, skip binary %s\n", d->name);
18833 continue;
18836 if (d->name == 0)
18838 if (TARGET_DEBUG_BUILTIN)
18839 fprintf (stderr, "htm_builtin, bdesc_htm[%ld] no name\n",
18840 (long unsigned) i);
18841 continue;
18844 op[nopnds++] = (void_func) ? void_type_node : rettype;
18846 if (attr_args == RS6000_BTC_UNARY)
18847 op[nopnds++] = argtype;
18848 else if (attr_args == RS6000_BTC_BINARY)
18850 op[nopnds++] = argtype;
18851 op[nopnds++] = argtype;
18853 else if (attr_args == RS6000_BTC_TERNARY)
18855 op[nopnds++] = argtype;
18856 op[nopnds++] = argtype;
18857 op[nopnds++] = argtype;
18860 switch (nopnds)
18862 case 1:
18863 type = build_function_type_list (op[0], NULL_TREE);
18864 break;
18865 case 2:
18866 type = build_function_type_list (op[0], op[1], NULL_TREE);
18867 break;
18868 case 3:
18869 type = build_function_type_list (op[0], op[1], op[2], NULL_TREE);
18870 break;
18871 case 4:
18872 type = build_function_type_list (op[0], op[1], op[2], op[3],
18873 NULL_TREE);
18874 break;
18875 default:
18876 gcc_unreachable ();
18879 def_builtin (d->name, type, d->code);
18883 /* Hash function for builtin functions with up to 3 arguments and a return
18884 type. */
18885 hashval_t
18886 builtin_hasher::hash (builtin_hash_struct *bh)
18888 unsigned ret = 0;
18889 int i;
18891 for (i = 0; i < 4; i++)
18893 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
18894 ret = (ret * 2) + bh->uns_p[i];
18897 return ret;
18900 /* Compare builtin hash entries H1 and H2 for equivalence. */
18901 bool
18902 builtin_hasher::equal (builtin_hash_struct *p1, builtin_hash_struct *p2)
18904 return ((p1->mode[0] == p2->mode[0])
18905 && (p1->mode[1] == p2->mode[1])
18906 && (p1->mode[2] == p2->mode[2])
18907 && (p1->mode[3] == p2->mode[3])
18908 && (p1->uns_p[0] == p2->uns_p[0])
18909 && (p1->uns_p[1] == p2->uns_p[1])
18910 && (p1->uns_p[2] == p2->uns_p[2])
18911 && (p1->uns_p[3] == p2->uns_p[3]));
18914 /* Map types for builtin functions with an explicit return type and up to 3
18915 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
18916 of the argument. */
18917 static tree
18918 builtin_function_type (machine_mode mode_ret, machine_mode mode_arg0,
18919 machine_mode mode_arg1, machine_mode mode_arg2,
18920 enum rs6000_builtins builtin, const char *name)
18922 struct builtin_hash_struct h;
18923 struct builtin_hash_struct *h2;
18924 int num_args = 3;
18925 int i;
18926 tree ret_type = NULL_TREE;
18927 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
18929 /* Create builtin_hash_table. */
18930 if (builtin_hash_table == NULL)
18931 builtin_hash_table = hash_table<builtin_hasher>::create_ggc (1500);
18933 h.type = NULL_TREE;
18934 h.mode[0] = mode_ret;
18935 h.mode[1] = mode_arg0;
18936 h.mode[2] = mode_arg1;
18937 h.mode[3] = mode_arg2;
18938 h.uns_p[0] = 0;
18939 h.uns_p[1] = 0;
18940 h.uns_p[2] = 0;
18941 h.uns_p[3] = 0;
18943 /* If the builtin is a type that produces unsigned results or takes unsigned
18944 arguments, and it is returned as a decl for the vectorizer (such as
18945 widening multiplies, permute), make sure the arguments and return value
18946 are type correct. */
18947 switch (builtin)
18949 /* unsigned 1 argument functions. */
18950 case CRYPTO_BUILTIN_VSBOX:
18951 case P8V_BUILTIN_VGBBD:
18952 case MISC_BUILTIN_CDTBCD:
18953 case MISC_BUILTIN_CBCDTD:
18954 h.uns_p[0] = 1;
18955 h.uns_p[1] = 1;
18956 break;
18958 /* unsigned 2 argument functions. */
18959 case ALTIVEC_BUILTIN_VMULEUB:
18960 case ALTIVEC_BUILTIN_VMULEUH:
18961 case ALTIVEC_BUILTIN_VMULOUB:
18962 case ALTIVEC_BUILTIN_VMULOUH:
18963 case CRYPTO_BUILTIN_VCIPHER:
18964 case CRYPTO_BUILTIN_VCIPHERLAST:
18965 case CRYPTO_BUILTIN_VNCIPHER:
18966 case CRYPTO_BUILTIN_VNCIPHERLAST:
18967 case CRYPTO_BUILTIN_VPMSUMB:
18968 case CRYPTO_BUILTIN_VPMSUMH:
18969 case CRYPTO_BUILTIN_VPMSUMW:
18970 case CRYPTO_BUILTIN_VPMSUMD:
18971 case CRYPTO_BUILTIN_VPMSUM:
18972 case MISC_BUILTIN_ADDG6S:
18973 case MISC_BUILTIN_DIVWEU:
18974 case MISC_BUILTIN_DIVWEUO:
18975 case MISC_BUILTIN_DIVDEU:
18976 case MISC_BUILTIN_DIVDEUO:
18977 case VSX_BUILTIN_UDIV_V2DI:
18978 h.uns_p[0] = 1;
18979 h.uns_p[1] = 1;
18980 h.uns_p[2] = 1;
18981 break;
18983 /* unsigned 3 argument functions. */
18984 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
18985 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
18986 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
18987 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
18988 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
18989 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
18990 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
18991 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
18992 case VSX_BUILTIN_VPERM_16QI_UNS:
18993 case VSX_BUILTIN_VPERM_8HI_UNS:
18994 case VSX_BUILTIN_VPERM_4SI_UNS:
18995 case VSX_BUILTIN_VPERM_2DI_UNS:
18996 case VSX_BUILTIN_XXSEL_16QI_UNS:
18997 case VSX_BUILTIN_XXSEL_8HI_UNS:
18998 case VSX_BUILTIN_XXSEL_4SI_UNS:
18999 case VSX_BUILTIN_XXSEL_2DI_UNS:
19000 case CRYPTO_BUILTIN_VPERMXOR:
19001 case CRYPTO_BUILTIN_VPERMXOR_V2DI:
19002 case CRYPTO_BUILTIN_VPERMXOR_V4SI:
19003 case CRYPTO_BUILTIN_VPERMXOR_V8HI:
19004 case CRYPTO_BUILTIN_VPERMXOR_V16QI:
19005 case CRYPTO_BUILTIN_VSHASIGMAW:
19006 case CRYPTO_BUILTIN_VSHASIGMAD:
19007 case CRYPTO_BUILTIN_VSHASIGMA:
19008 h.uns_p[0] = 1;
19009 h.uns_p[1] = 1;
19010 h.uns_p[2] = 1;
19011 h.uns_p[3] = 1;
19012 break;
19014 /* signed permute functions with unsigned char mask. */
19015 case ALTIVEC_BUILTIN_VPERM_16QI:
19016 case ALTIVEC_BUILTIN_VPERM_8HI:
19017 case ALTIVEC_BUILTIN_VPERM_4SI:
19018 case ALTIVEC_BUILTIN_VPERM_4SF:
19019 case ALTIVEC_BUILTIN_VPERM_2DI:
19020 case ALTIVEC_BUILTIN_VPERM_2DF:
19021 case VSX_BUILTIN_VPERM_16QI:
19022 case VSX_BUILTIN_VPERM_8HI:
19023 case VSX_BUILTIN_VPERM_4SI:
19024 case VSX_BUILTIN_VPERM_4SF:
19025 case VSX_BUILTIN_VPERM_2DI:
19026 case VSX_BUILTIN_VPERM_2DF:
19027 h.uns_p[3] = 1;
19028 break;
19030 /* unsigned args, signed return. */
19031 case VSX_BUILTIN_XVCVUXDSP:
19032 case VSX_BUILTIN_XVCVUXDDP_UNS:
19033 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
19034 h.uns_p[1] = 1;
19035 break;
19037 /* signed args, unsigned return. */
19038 case VSX_BUILTIN_XVCVDPUXDS_UNS:
19039 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
19040 case MISC_BUILTIN_UNPACK_TD:
19041 case MISC_BUILTIN_UNPACK_V1TI:
19042 h.uns_p[0] = 1;
19043 break;
19045 /* unsigned arguments for 128-bit pack instructions. */
19046 case MISC_BUILTIN_PACK_TD:
19047 case MISC_BUILTIN_PACK_V1TI:
19048 h.uns_p[1] = 1;
19049 h.uns_p[2] = 1;
19050 break;
19052 default:
19053 break;
19056 /* Figure out how many args are present. */
19057 while (num_args > 0 && h.mode[num_args] == VOIDmode)
19058 num_args--;
19060 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
19061 if (!ret_type && h.uns_p[0])
19062 ret_type = builtin_mode_to_type[h.mode[0]][0];
19064 if (!ret_type)
19065 fatal_error (input_location,
19066 "internal error: builtin function %s had an unexpected "
19067 "return type %s", name, GET_MODE_NAME (h.mode[0]));
19069 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
19070 arg_type[i] = NULL_TREE;
19072 for (i = 0; i < num_args; i++)
19074 int m = (int) h.mode[i+1];
19075 int uns_p = h.uns_p[i+1];
19077 arg_type[i] = builtin_mode_to_type[m][uns_p];
19078 if (!arg_type[i] && uns_p)
19079 arg_type[i] = builtin_mode_to_type[m][0];
19081 if (!arg_type[i])
19082 fatal_error (input_location,
19083 "internal error: builtin function %s, argument %d "
19084 "had unexpected argument type %s", name, i,
19085 GET_MODE_NAME (m));
19088 builtin_hash_struct **found = builtin_hash_table->find_slot (&h, INSERT);
19089 if (*found == NULL)
19091 h2 = ggc_alloc<builtin_hash_struct> ();
19092 *h2 = h;
19093 *found = h2;
19095 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
19096 arg_type[2], NULL_TREE);
19099 return (*found)->type;
19102 static void
19103 rs6000_common_init_builtins (void)
19105 const struct builtin_description *d;
19106 size_t i;
19108 tree opaque_ftype_opaque = NULL_TREE;
19109 tree opaque_ftype_opaque_opaque = NULL_TREE;
19110 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
19111 tree v2si_ftype = NULL_TREE;
19112 tree v2si_ftype_qi = NULL_TREE;
19113 tree v2si_ftype_v2si_qi = NULL_TREE;
19114 tree v2si_ftype_int_qi = NULL_TREE;
19115 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
19117 if (!TARGET_PAIRED_FLOAT)
19119 builtin_mode_to_type[V2SImode][0] = opaque_V2SI_type_node;
19120 builtin_mode_to_type[V2SFmode][0] = opaque_V2SF_type_node;
19123 /* Paired and SPE builtins are only available if you build a compiler with
19124 the appropriate options, so only create those builtins with the
19125 appropriate compiler option. Create Altivec and VSX builtins on machines
19126 with at least the general purpose extensions (970 and newer) to allow the
19127 use of the target attribute.. */
19129 if (TARGET_EXTRA_BUILTINS)
19130 builtin_mask |= RS6000_BTM_COMMON;
19132 /* Add the ternary operators. */
19133 d = bdesc_3arg;
19134 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
19136 tree type;
19137 HOST_WIDE_INT mask = d->mask;
19139 if ((mask & builtin_mask) != mask)
19141 if (TARGET_DEBUG_BUILTIN)
19142 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
19143 continue;
19146 if (rs6000_overloaded_builtin_p (d->code))
19148 if (! (type = opaque_ftype_opaque_opaque_opaque))
19149 type = opaque_ftype_opaque_opaque_opaque
19150 = build_function_type_list (opaque_V4SI_type_node,
19151 opaque_V4SI_type_node,
19152 opaque_V4SI_type_node,
19153 opaque_V4SI_type_node,
19154 NULL_TREE);
19156 else
19158 enum insn_code icode = d->icode;
19159 if (d->name == 0)
19161 if (TARGET_DEBUG_BUILTIN)
19162 fprintf (stderr, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
19163 (long unsigned)i);
19165 continue;
19168 if (icode == CODE_FOR_nothing)
19170 if (TARGET_DEBUG_BUILTIN)
19171 fprintf (stderr, "rs6000_builtin, skip ternary %s (no code)\n",
19172 d->name);
19174 continue;
19177 type = builtin_function_type (insn_data[icode].operand[0].mode,
19178 insn_data[icode].operand[1].mode,
19179 insn_data[icode].operand[2].mode,
19180 insn_data[icode].operand[3].mode,
19181 d->code, d->name);
19184 def_builtin (d->name, type, d->code);
19187 /* Add the binary operators. */
19188 d = bdesc_2arg;
19189 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
19191 machine_mode mode0, mode1, mode2;
19192 tree type;
19193 HOST_WIDE_INT mask = d->mask;
19195 if ((mask & builtin_mask) != mask)
19197 if (TARGET_DEBUG_BUILTIN)
19198 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
19199 continue;
19202 if (rs6000_overloaded_builtin_p (d->code))
19204 if (! (type = opaque_ftype_opaque_opaque))
19205 type = opaque_ftype_opaque_opaque
19206 = build_function_type_list (opaque_V4SI_type_node,
19207 opaque_V4SI_type_node,
19208 opaque_V4SI_type_node,
19209 NULL_TREE);
19211 else
19213 enum insn_code icode = d->icode;
19214 if (d->name == 0)
19216 if (TARGET_DEBUG_BUILTIN)
19217 fprintf (stderr, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
19218 (long unsigned)i);
19220 continue;
19223 if (icode == CODE_FOR_nothing)
19225 if (TARGET_DEBUG_BUILTIN)
19226 fprintf (stderr, "rs6000_builtin, skip binary %s (no code)\n",
19227 d->name);
19229 continue;
19232 mode0 = insn_data[icode].operand[0].mode;
19233 mode1 = insn_data[icode].operand[1].mode;
19234 mode2 = insn_data[icode].operand[2].mode;
19236 if (mode0 == V2SImode && mode1 == V2SImode && mode2 == QImode)
19238 if (! (type = v2si_ftype_v2si_qi))
19239 type = v2si_ftype_v2si_qi
19240 = build_function_type_list (opaque_V2SI_type_node,
19241 opaque_V2SI_type_node,
19242 char_type_node,
19243 NULL_TREE);
19246 else if (mode0 == V2SImode && GET_MODE_CLASS (mode1) == MODE_INT
19247 && mode2 == QImode)
19249 if (! (type = v2si_ftype_int_qi))
19250 type = v2si_ftype_int_qi
19251 = build_function_type_list (opaque_V2SI_type_node,
19252 integer_type_node,
19253 char_type_node,
19254 NULL_TREE);
19257 else
19258 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
19259 d->code, d->name);
19262 def_builtin (d->name, type, d->code);
19265 /* Add the simple unary operators. */
19266 d = bdesc_1arg;
19267 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
19269 machine_mode mode0, mode1;
19270 tree type;
19271 HOST_WIDE_INT mask = d->mask;
19273 if ((mask & builtin_mask) != mask)
19275 if (TARGET_DEBUG_BUILTIN)
19276 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
19277 continue;
19280 if (rs6000_overloaded_builtin_p (d->code))
19282 if (! (type = opaque_ftype_opaque))
19283 type = opaque_ftype_opaque
19284 = build_function_type_list (opaque_V4SI_type_node,
19285 opaque_V4SI_type_node,
19286 NULL_TREE);
19288 else
19290 enum insn_code icode = d->icode;
19291 if (d->name == 0)
19293 if (TARGET_DEBUG_BUILTIN)
19294 fprintf (stderr, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
19295 (long unsigned)i);
19297 continue;
19300 if (icode == CODE_FOR_nothing)
19302 if (TARGET_DEBUG_BUILTIN)
19303 fprintf (stderr, "rs6000_builtin, skip unary %s (no code)\n",
19304 d->name);
19306 continue;
19309 mode0 = insn_data[icode].operand[0].mode;
19310 mode1 = insn_data[icode].operand[1].mode;
19312 if (mode0 == V2SImode && mode1 == QImode)
19314 if (! (type = v2si_ftype_qi))
19315 type = v2si_ftype_qi
19316 = build_function_type_list (opaque_V2SI_type_node,
19317 char_type_node,
19318 NULL_TREE);
19321 else
19322 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
19323 d->code, d->name);
19326 def_builtin (d->name, type, d->code);
19329 /* Add the simple no-argument operators. */
19330 d = bdesc_0arg;
19331 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
19333 machine_mode mode0;
19334 tree type;
19335 HOST_WIDE_INT mask = d->mask;
19337 if ((mask & builtin_mask) != mask)
19339 if (TARGET_DEBUG_BUILTIN)
19340 fprintf (stderr, "rs6000_builtin, skip no-argument %s\n", d->name);
19341 continue;
19343 if (rs6000_overloaded_builtin_p (d->code))
19345 if (!opaque_ftype_opaque)
19346 opaque_ftype_opaque
19347 = build_function_type_list (opaque_V4SI_type_node, NULL_TREE);
19348 type = opaque_ftype_opaque;
19350 else
19352 enum insn_code icode = d->icode;
19353 if (d->name == 0)
19355 if (TARGET_DEBUG_BUILTIN)
19356 fprintf (stderr, "rs6000_builtin, bdesc_0arg[%lu] no name\n",
19357 (long unsigned) i);
19358 continue;
19360 if (icode == CODE_FOR_nothing)
19362 if (TARGET_DEBUG_BUILTIN)
19363 fprintf (stderr,
19364 "rs6000_builtin, skip no-argument %s (no code)\n",
19365 d->name);
19366 continue;
19368 mode0 = insn_data[icode].operand[0].mode;
19369 if (mode0 == V2SImode)
19371 /* code for SPE */
19372 if (! (type = v2si_ftype))
19374 v2si_ftype
19375 = build_function_type_list (opaque_V2SI_type_node,
19376 NULL_TREE);
19377 type = v2si_ftype;
19380 else
19381 type = builtin_function_type (mode0, VOIDmode, VOIDmode, VOIDmode,
19382 d->code, d->name);
19384 def_builtin (d->name, type, d->code);
19388 /* Set up AIX/Darwin/64-bit Linux quad floating point routines. */
19389 static void
19390 init_float128_ibm (machine_mode mode)
19392 if (!TARGET_XL_COMPAT)
19394 set_optab_libfunc (add_optab, mode, "__gcc_qadd");
19395 set_optab_libfunc (sub_optab, mode, "__gcc_qsub");
19396 set_optab_libfunc (smul_optab, mode, "__gcc_qmul");
19397 set_optab_libfunc (sdiv_optab, mode, "__gcc_qdiv");
19399 if (!(TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)))
19401 set_optab_libfunc (neg_optab, mode, "__gcc_qneg");
19402 set_optab_libfunc (eq_optab, mode, "__gcc_qeq");
19403 set_optab_libfunc (ne_optab, mode, "__gcc_qne");
19404 set_optab_libfunc (gt_optab, mode, "__gcc_qgt");
19405 set_optab_libfunc (ge_optab, mode, "__gcc_qge");
19406 set_optab_libfunc (lt_optab, mode, "__gcc_qlt");
19407 set_optab_libfunc (le_optab, mode, "__gcc_qle");
19409 set_conv_libfunc (sext_optab, mode, SFmode, "__gcc_stoq");
19410 set_conv_libfunc (sext_optab, mode, DFmode, "__gcc_dtoq");
19411 set_conv_libfunc (trunc_optab, SFmode, mode, "__gcc_qtos");
19412 set_conv_libfunc (trunc_optab, DFmode, mode, "__gcc_qtod");
19413 set_conv_libfunc (sfix_optab, SImode, mode, "__gcc_qtoi");
19414 set_conv_libfunc (ufix_optab, SImode, mode, "__gcc_qtou");
19415 set_conv_libfunc (sfloat_optab, mode, SImode, "__gcc_itoq");
19416 set_conv_libfunc (ufloat_optab, mode, SImode, "__gcc_utoq");
19419 if (!(TARGET_HARD_FLOAT && TARGET_FPRS))
19420 set_optab_libfunc (unord_optab, mode, "__gcc_qunord");
19422 else
19424 set_optab_libfunc (add_optab, mode, "_xlqadd");
19425 set_optab_libfunc (sub_optab, mode, "_xlqsub");
19426 set_optab_libfunc (smul_optab, mode, "_xlqmul");
19427 set_optab_libfunc (sdiv_optab, mode, "_xlqdiv");
19430 /* Add various conversions for IFmode to use the traditional TFmode
19431 names. */
19432 if (mode == IFmode)
19434 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdtf2");
19435 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddtf2");
19436 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctftd2");
19437 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunctfsd2");
19438 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunctfdd2");
19439 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtdtf2");
19441 if (TARGET_POWERPC64)
19443 set_conv_libfunc (sfix_optab, TImode, mode, "__fixtfti");
19444 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunstfti");
19445 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattitf");
19446 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntitf");
19451 /* Set up IEEE 128-bit floating point routines. Use different names if the
19452 arguments can be passed in a vector register. The historical PowerPC
19453 implementation of IEEE 128-bit floating point used _q_<op> for the names, so
19454 continue to use that if we aren't using vector registers to pass IEEE
19455 128-bit floating point. */
19457 static void
19458 init_float128_ieee (machine_mode mode)
19460 if (FLOAT128_VECTOR_P (mode))
19462 set_optab_libfunc (add_optab, mode, "__addkf3");
19463 set_optab_libfunc (sub_optab, mode, "__subkf3");
19464 set_optab_libfunc (neg_optab, mode, "__negkf2");
19465 set_optab_libfunc (smul_optab, mode, "__mulkf3");
19466 set_optab_libfunc (sdiv_optab, mode, "__divkf3");
19467 set_optab_libfunc (sqrt_optab, mode, "__sqrtkf2");
19468 set_optab_libfunc (abs_optab, mode, "__abstkf2");
19470 set_optab_libfunc (eq_optab, mode, "__eqkf2");
19471 set_optab_libfunc (ne_optab, mode, "__nekf2");
19472 set_optab_libfunc (gt_optab, mode, "__gtkf2");
19473 set_optab_libfunc (ge_optab, mode, "__gekf2");
19474 set_optab_libfunc (lt_optab, mode, "__ltkf2");
19475 set_optab_libfunc (le_optab, mode, "__lekf2");
19476 set_optab_libfunc (unord_optab, mode, "__unordkf2");
19478 set_conv_libfunc (sext_optab, mode, SFmode, "__extendsfkf2");
19479 set_conv_libfunc (sext_optab, mode, DFmode, "__extenddfkf2");
19480 set_conv_libfunc (trunc_optab, SFmode, mode, "__trunckfsf2");
19481 set_conv_libfunc (trunc_optab, DFmode, mode, "__trunckfdf2");
19483 set_conv_libfunc (sext_optab, mode, IFmode, "__extendtfkf2");
19484 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
19485 set_conv_libfunc (sext_optab, mode, TFmode, "__extendtfkf2");
19487 set_conv_libfunc (trunc_optab, IFmode, mode, "__trunckftf2");
19488 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
19489 set_conv_libfunc (trunc_optab, TFmode, mode, "__trunckftf2");
19491 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdkf2");
19492 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddkf2");
19493 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunckftd2");
19494 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunckfsd2");
19495 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunckfdd2");
19496 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtdkf2");
19498 set_conv_libfunc (sfix_optab, SImode, mode, "__fixkfsi");
19499 set_conv_libfunc (ufix_optab, SImode, mode, "__fixunskfsi");
19500 set_conv_libfunc (sfix_optab, DImode, mode, "__fixkfdi");
19501 set_conv_libfunc (ufix_optab, DImode, mode, "__fixunskfdi");
19503 set_conv_libfunc (sfloat_optab, mode, SImode, "__floatsikf");
19504 set_conv_libfunc (ufloat_optab, mode, SImode, "__floatunsikf");
19505 set_conv_libfunc (sfloat_optab, mode, DImode, "__floatdikf");
19506 set_conv_libfunc (ufloat_optab, mode, DImode, "__floatundikf");
19508 if (TARGET_POWERPC64)
19510 set_conv_libfunc (sfix_optab, TImode, mode, "__fixkfti");
19511 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunskfti");
19512 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattikf");
19513 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntikf");
19517 else
19519 set_optab_libfunc (add_optab, mode, "_q_add");
19520 set_optab_libfunc (sub_optab, mode, "_q_sub");
19521 set_optab_libfunc (neg_optab, mode, "_q_neg");
19522 set_optab_libfunc (smul_optab, mode, "_q_mul");
19523 set_optab_libfunc (sdiv_optab, mode, "_q_div");
19524 if (TARGET_PPC_GPOPT)
19525 set_optab_libfunc (sqrt_optab, mode, "_q_sqrt");
19527 set_optab_libfunc (eq_optab, mode, "_q_feq");
19528 set_optab_libfunc (ne_optab, mode, "_q_fne");
19529 set_optab_libfunc (gt_optab, mode, "_q_fgt");
19530 set_optab_libfunc (ge_optab, mode, "_q_fge");
19531 set_optab_libfunc (lt_optab, mode, "_q_flt");
19532 set_optab_libfunc (le_optab, mode, "_q_fle");
19534 set_conv_libfunc (sext_optab, mode, SFmode, "_q_stoq");
19535 set_conv_libfunc (sext_optab, mode, DFmode, "_q_dtoq");
19536 set_conv_libfunc (trunc_optab, SFmode, mode, "_q_qtos");
19537 set_conv_libfunc (trunc_optab, DFmode, mode, "_q_qtod");
19538 set_conv_libfunc (sfix_optab, SImode, mode, "_q_qtoi");
19539 set_conv_libfunc (ufix_optab, SImode, mode, "_q_qtou");
19540 set_conv_libfunc (sfloat_optab, mode, SImode, "_q_itoq");
19541 set_conv_libfunc (ufloat_optab, mode, SImode, "_q_utoq");
19545 static void
19546 rs6000_init_libfuncs (void)
19548 /* __float128 support. */
19549 if (TARGET_FLOAT128_TYPE)
19551 init_float128_ibm (IFmode);
19552 init_float128_ieee (KFmode);
19555 /* AIX/Darwin/64-bit Linux quad floating point routines. */
19556 if (TARGET_LONG_DOUBLE_128)
19558 if (!TARGET_IEEEQUAD)
19559 init_float128_ibm (TFmode);
19561 /* IEEE 128-bit including 32-bit SVR4 quad floating point routines. */
19562 else
19563 init_float128_ieee (TFmode);
19568 /* Expand a block clear operation, and return 1 if successful. Return 0
19569 if we should let the compiler generate normal code.
19571 operands[0] is the destination
19572 operands[1] is the length
19573 operands[3] is the alignment */
19576 expand_block_clear (rtx operands[])
19578 rtx orig_dest = operands[0];
19579 rtx bytes_rtx = operands[1];
19580 rtx align_rtx = operands[3];
19581 bool constp = (GET_CODE (bytes_rtx) == CONST_INT);
19582 HOST_WIDE_INT align;
19583 HOST_WIDE_INT bytes;
19584 int offset;
19585 int clear_bytes;
19586 int clear_step;
19588 /* If this is not a fixed size move, just call memcpy */
19589 if (! constp)
19590 return 0;
19592 /* This must be a fixed size alignment */
19593 gcc_assert (GET_CODE (align_rtx) == CONST_INT);
19594 align = INTVAL (align_rtx) * BITS_PER_UNIT;
19596 /* Anything to clear? */
19597 bytes = INTVAL (bytes_rtx);
19598 if (bytes <= 0)
19599 return 1;
19601 /* Use the builtin memset after a point, to avoid huge code bloat.
19602 When optimize_size, avoid any significant code bloat; calling
19603 memset is about 4 instructions, so allow for one instruction to
19604 load zero and three to do clearing. */
19605 if (TARGET_ALTIVEC && align >= 128)
19606 clear_step = 16;
19607 else if (TARGET_POWERPC64 && (align >= 64 || !STRICT_ALIGNMENT))
19608 clear_step = 8;
19609 else if (TARGET_SPE && align >= 64)
19610 clear_step = 8;
19611 else
19612 clear_step = 4;
19614 if (optimize_size && bytes > 3 * clear_step)
19615 return 0;
19616 if (! optimize_size && bytes > 8 * clear_step)
19617 return 0;
19619 for (offset = 0; bytes > 0; offset += clear_bytes, bytes -= clear_bytes)
19621 machine_mode mode = BLKmode;
19622 rtx dest;
19624 if (bytes >= 16 && TARGET_ALTIVEC && align >= 128)
19626 clear_bytes = 16;
19627 mode = V4SImode;
19629 else if (bytes >= 8 && TARGET_SPE && align >= 64)
19631 clear_bytes = 8;
19632 mode = V2SImode;
19634 else if (bytes >= 8 && TARGET_POWERPC64
19635 && (align >= 64 || !STRICT_ALIGNMENT))
19637 clear_bytes = 8;
19638 mode = DImode;
19639 if (offset == 0 && align < 64)
19641 rtx addr;
19643 /* If the address form is reg+offset with offset not a
19644 multiple of four, reload into reg indirect form here
19645 rather than waiting for reload. This way we get one
19646 reload, not one per store. */
19647 addr = XEXP (orig_dest, 0);
19648 if ((GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
19649 && GET_CODE (XEXP (addr, 1)) == CONST_INT
19650 && (INTVAL (XEXP (addr, 1)) & 3) != 0)
19652 addr = copy_addr_to_reg (addr);
19653 orig_dest = replace_equiv_address (orig_dest, addr);
19657 else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
19658 { /* move 4 bytes */
19659 clear_bytes = 4;
19660 mode = SImode;
19662 else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
19663 { /* move 2 bytes */
19664 clear_bytes = 2;
19665 mode = HImode;
19667 else /* move 1 byte at a time */
19669 clear_bytes = 1;
19670 mode = QImode;
19673 dest = adjust_address (orig_dest, mode, offset);
19675 emit_move_insn (dest, CONST0_RTX (mode));
19678 return 1;
19681 /* Emit a potentially record-form instruction, setting DST from SRC.
19682 If DOT is 0, that is all; otherwise, set CCREG to the result of the
19683 signed comparison of DST with zero. If DOT is 1, the generated RTL
19684 doesn't care about the DST result; if DOT is 2, it does. If CCREG
19685 is CR0 do a single dot insn (as a PARALLEL); otherwise, do a SET and
19686 a separate COMPARE. */
19688 static void
19689 rs6000_emit_dot_insn (rtx dst, rtx src, int dot, rtx ccreg)
19691 if (dot == 0)
19693 emit_move_insn (dst, src);
19694 return;
19697 if (cc_reg_not_cr0_operand (ccreg, CCmode))
19699 emit_move_insn (dst, src);
19700 emit_move_insn (ccreg, gen_rtx_COMPARE (CCmode, dst, const0_rtx));
19701 return;
19704 rtx ccset = gen_rtx_SET (ccreg, gen_rtx_COMPARE (CCmode, src, const0_rtx));
19705 if (dot == 1)
19707 rtx clobber = gen_rtx_CLOBBER (VOIDmode, dst);
19708 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, clobber)));
19710 else
19712 rtx set = gen_rtx_SET (dst, src);
19713 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, set)));
19717 /* Figure out the correct instructions to generate to load data for
19718 block compare. MODE is used for the read from memory, and
19719 data is zero extended if REG is wider than MODE. If LE code
19720 is being generated, bswap loads are used.
19722 REG is the destination register to move the data into.
19723 MEM is the memory block being read.
19724 MODE is the mode of memory to use for the read. */
19725 static void
19726 do_load_for_compare (rtx reg, rtx mem, machine_mode mode)
19728 switch (GET_MODE (reg))
19730 case DImode:
19731 switch (mode)
19733 case QImode:
19734 emit_insn (gen_zero_extendqidi2 (reg, mem));
19735 break;
19736 case HImode:
19738 rtx src = mem;
19739 if (!BYTES_BIG_ENDIAN)
19741 src = gen_reg_rtx (HImode);
19742 emit_insn (gen_bswaphi2 (src, mem));
19744 emit_insn (gen_zero_extendhidi2 (reg, src));
19745 break;
19747 case SImode:
19749 rtx src = mem;
19750 if (!BYTES_BIG_ENDIAN)
19752 src = gen_reg_rtx (SImode);
19753 emit_insn (gen_bswapsi2 (src, mem));
19755 emit_insn (gen_zero_extendsidi2 (reg, src));
19757 break;
19758 case DImode:
19759 if (!BYTES_BIG_ENDIAN)
19760 emit_insn (gen_bswapdi2 (reg, mem));
19761 else
19762 emit_insn (gen_movdi (reg, mem));
19763 break;
19764 default:
19765 gcc_unreachable ();
19767 break;
19769 case SImode:
19770 switch (mode)
19772 case QImode:
19773 emit_insn (gen_zero_extendqisi2 (reg, mem));
19774 break;
19775 case HImode:
19777 rtx src = mem;
19778 if (!BYTES_BIG_ENDIAN)
19780 src = gen_reg_rtx (HImode);
19781 emit_insn (gen_bswaphi2 (src, mem));
19783 emit_insn (gen_zero_extendhisi2 (reg, src));
19784 break;
19786 case SImode:
19787 if (!BYTES_BIG_ENDIAN)
19788 emit_insn (gen_bswapsi2 (reg, mem));
19789 else
19790 emit_insn (gen_movsi (reg, mem));
19791 break;
19792 case DImode:
19793 /* DImode is larger than the destination reg so is not expected. */
19794 gcc_unreachable ();
19795 break;
19796 default:
19797 gcc_unreachable ();
19799 break;
19800 default:
19801 gcc_unreachable ();
19802 break;
19806 /* Select the mode to be used for reading the next chunk of bytes
19807 in the compare.
19809 OFFSET is the current read offset from the beginning of the block.
19810 BYTES is the number of bytes remaining to be read.
19811 ALIGN is the minimum alignment of the memory blocks being compared in bytes.
19812 WORD_MODE_OK indicates using WORD_MODE is allowed, else SImode is
19813 the largest allowable mode. */
19814 static machine_mode
19815 select_block_compare_mode (unsigned HOST_WIDE_INT offset,
19816 unsigned HOST_WIDE_INT bytes,
19817 unsigned HOST_WIDE_INT align, bool word_mode_ok)
19819 /* First see if we can do a whole load unit
19820 as that will be more efficient than a larger load + shift. */
19822 /* If big, use biggest chunk.
19823 If exactly chunk size, use that size.
19824 If remainder can be done in one piece with shifting, do that.
19825 Do largest chunk possible without violating alignment rules. */
19827 /* The most we can read without potential page crossing. */
19828 unsigned HOST_WIDE_INT maxread = ROUND_UP (bytes, align);
19830 if (word_mode_ok && bytes >= UNITS_PER_WORD)
19831 return word_mode;
19832 else if (bytes == GET_MODE_SIZE (SImode))
19833 return SImode;
19834 else if (bytes == GET_MODE_SIZE (HImode))
19835 return HImode;
19836 else if (bytes == GET_MODE_SIZE (QImode))
19837 return QImode;
19838 else if (bytes < GET_MODE_SIZE (SImode)
19839 && offset >= GET_MODE_SIZE (SImode) - bytes)
19840 /* This matches the case were we have SImode and 3 bytes
19841 and offset >= 1 and permits us to move back one and overlap
19842 with the previous read, thus avoiding having to shift
19843 unwanted bytes off of the input. */
19844 return SImode;
19845 else if (word_mode_ok && bytes < UNITS_PER_WORD
19846 && offset >= UNITS_PER_WORD-bytes)
19847 /* Similarly, if we can use DImode it will get matched here and
19848 can do an overlapping read that ends at the end of the block. */
19849 return word_mode;
19850 else if (word_mode_ok && maxread >= UNITS_PER_WORD)
19851 /* It is safe to do all remaining in one load of largest size,
19852 possibly with a shift to get rid of unwanted bytes. */
19853 return word_mode;
19854 else if (maxread >= GET_MODE_SIZE (SImode))
19855 /* It is safe to do all remaining in one SImode load,
19856 possibly with a shift to get rid of unwanted bytes. */
19857 return SImode;
19858 else if (bytes > GET_MODE_SIZE (SImode))
19859 return SImode;
19860 else if (bytes > GET_MODE_SIZE (HImode))
19861 return HImode;
19863 /* final fallback is do one byte */
19864 return QImode;
19867 /* Compute the alignment of pointer+OFFSET where the original alignment
19868 of pointer was BASE_ALIGN. */
19869 static unsigned HOST_WIDE_INT
19870 compute_current_alignment (unsigned HOST_WIDE_INT base_align,
19871 unsigned HOST_WIDE_INT offset)
19873 if (offset == 0)
19874 return base_align;
19875 return min (base_align, offset & -offset);
19878 /* Expand a block compare operation, and return true if successful.
19879 Return false if we should let the compiler generate normal code,
19880 probably a memcmp call.
19882 OPERANDS[0] is the target (result).
19883 OPERANDS[1] is the first source.
19884 OPERANDS[2] is the second source.
19885 OPERANDS[3] is the length.
19886 OPERANDS[4] is the alignment. */
19887 bool
19888 expand_block_compare (rtx operands[])
19890 rtx target = operands[0];
19891 rtx orig_src1 = operands[1];
19892 rtx orig_src2 = operands[2];
19893 rtx bytes_rtx = operands[3];
19894 rtx align_rtx = operands[4];
19895 HOST_WIDE_INT cmp_bytes = 0;
19896 rtx src1 = orig_src1;
19897 rtx src2 = orig_src2;
19899 /* This case is complicated to handle because the subtract
19900 with carry instructions do not generate the 64-bit
19901 carry and so we must emit code to calculate it ourselves.
19902 We choose not to implement this yet. */
19903 if (TARGET_32BIT && TARGET_POWERPC64)
19904 return false;
19906 /* If this is not a fixed size compare, just call memcmp. */
19907 if (!CONST_INT_P (bytes_rtx))
19908 return false;
19910 /* This must be a fixed size alignment. */
19911 if (!CONST_INT_P (align_rtx))
19912 return false;
19914 unsigned int base_align = UINTVAL (align_rtx) / BITS_PER_UNIT;
19916 /* SLOW_UNALIGNED_ACCESS -- don't do unaligned stuff. */
19917 if (SLOW_UNALIGNED_ACCESS (word_mode, MEM_ALIGN (orig_src1))
19918 || SLOW_UNALIGNED_ACCESS (word_mode, MEM_ALIGN (orig_src2)))
19919 return false;
19921 gcc_assert (GET_MODE (target) == SImode);
19923 /* Anything to move? */
19924 unsigned HOST_WIDE_INT bytes = UINTVAL (bytes_rtx);
19925 if (bytes == 0)
19926 return true;
19928 /* The code generated for p7 and older is not faster than glibc
19929 memcmp if alignment is small and length is not short, so bail
19930 out to avoid those conditions. */
19931 if (!TARGET_EFFICIENT_OVERLAPPING_UNALIGNED
19932 && ((base_align == 1 && bytes > 16)
19933 || (base_align == 2 && bytes > 32)))
19934 return false;
19936 rtx tmp_reg_src1 = gen_reg_rtx (word_mode);
19937 rtx tmp_reg_src2 = gen_reg_rtx (word_mode);
19938 /* P7/P8 code uses cond for subfc. but P9 uses
19939 it for cmpld which needs CCUNSmode. */
19940 rtx cond;
19941 if (TARGET_P9_MISC)
19942 cond = gen_reg_rtx (CCUNSmode);
19943 else
19944 cond = gen_reg_rtx (CCmode);
19946 /* If we have an LE target without ldbrx and word_mode is DImode,
19947 then we must avoid using word_mode. */
19948 int word_mode_ok = !(!BYTES_BIG_ENDIAN && !TARGET_LDBRX
19949 && word_mode == DImode);
19951 /* Strategy phase. How many ops will this take and should we expand it? */
19953 unsigned HOST_WIDE_INT offset = 0;
19954 machine_mode load_mode =
19955 select_block_compare_mode (offset, bytes, base_align, word_mode_ok);
19956 unsigned int load_mode_size = GET_MODE_SIZE (load_mode);
19958 /* We don't want to generate too much code. */
19959 unsigned HOST_WIDE_INT max_bytes =
19960 load_mode_size * (unsigned HOST_WIDE_INT) rs6000_block_compare_inline_limit;
19961 if (!IN_RANGE (bytes, 1, max_bytes))
19962 return false;
19964 bool generate_6432_conversion = false;
19965 rtx convert_label = NULL;
19966 rtx final_label = NULL;
19968 /* Example of generated code for 18 bytes aligned 1 byte.
19969 Compiled with -fno-reorder-blocks for clarity.
19970 ldbrx 10,31,8
19971 ldbrx 9,7,8
19972 subfc. 9,9,10
19973 bne 0,.L6487
19974 addi 9,12,8
19975 addi 5,11,8
19976 ldbrx 10,0,9
19977 ldbrx 9,0,5
19978 subfc. 9,9,10
19979 bne 0,.L6487
19980 addi 9,12,16
19981 lhbrx 10,0,9
19982 addi 9,11,16
19983 lhbrx 9,0,9
19984 subf 9,9,10
19985 b .L6488
19986 .p2align 4,,15
19987 .L6487: #convert_label
19988 popcntd 9,9
19989 subfe 10,10,10
19990 or 9,9,10
19991 .L6488: #final_label
19992 extsw 10,9
19994 We start off with DImode for two blocks that jump to the DI->SI conversion
19995 if the difference is found there, then a final block of HImode that skips
19996 the DI->SI conversion. */
19998 while (bytes > 0)
20000 unsigned int align = compute_current_alignment (base_align, offset);
20001 if (TARGET_EFFICIENT_OVERLAPPING_UNALIGNED)
20002 load_mode = select_block_compare_mode (offset, bytes, align,
20003 word_mode_ok);
20004 else
20005 load_mode = select_block_compare_mode (0, bytes, align, word_mode_ok);
20006 load_mode_size = GET_MODE_SIZE (load_mode);
20007 if (bytes >= load_mode_size)
20008 cmp_bytes = load_mode_size;
20009 else if (TARGET_EFFICIENT_OVERLAPPING_UNALIGNED)
20011 /* Move this load back so it doesn't go past the end.
20012 P8/P9 can do this efficiently. */
20013 unsigned int extra_bytes = load_mode_size - bytes;
20014 cmp_bytes = bytes;
20015 if (extra_bytes < offset)
20017 offset -= extra_bytes;
20018 cmp_bytes = load_mode_size;
20019 bytes = cmp_bytes;
20022 else
20023 /* P7 and earlier can't do the overlapping load trick fast,
20024 so this forces a non-overlapping load and a shift to get
20025 rid of the extra bytes. */
20026 cmp_bytes = bytes;
20028 src1 = adjust_address (orig_src1, load_mode, offset);
20029 src2 = adjust_address (orig_src2, load_mode, offset);
20031 if (!REG_P (XEXP (src1, 0)))
20033 rtx src1_reg = copy_addr_to_reg (XEXP (src1, 0));
20034 src1 = replace_equiv_address (src1, src1_reg);
20036 set_mem_size (src1, cmp_bytes);
20038 if (!REG_P (XEXP (src2, 0)))
20040 rtx src2_reg = copy_addr_to_reg (XEXP (src2, 0));
20041 src2 = replace_equiv_address (src2, src2_reg);
20043 set_mem_size (src2, cmp_bytes);
20045 do_load_for_compare (tmp_reg_src1, src1, load_mode);
20046 do_load_for_compare (tmp_reg_src2, src2, load_mode);
20048 if (cmp_bytes < load_mode_size)
20050 /* Shift unneeded bytes off. */
20051 rtx sh = GEN_INT (BITS_PER_UNIT * (load_mode_size - cmp_bytes));
20052 if (word_mode == DImode)
20054 emit_insn (gen_lshrdi3 (tmp_reg_src1, tmp_reg_src1, sh));
20055 emit_insn (gen_lshrdi3 (tmp_reg_src2, tmp_reg_src2, sh));
20057 else
20059 emit_insn (gen_lshrsi3 (tmp_reg_src1, tmp_reg_src1, sh));
20060 emit_insn (gen_lshrsi3 (tmp_reg_src2, tmp_reg_src2, sh));
20064 int remain = bytes - cmp_bytes;
20065 if (GET_MODE_SIZE (GET_MODE (target)) > GET_MODE_SIZE (load_mode))
20067 /* Target is larger than load size so we don't need to
20068 reduce result size. */
20070 /* We previously did a block that need 64->32 conversion but
20071 the current block does not, so a label is needed to jump
20072 to the end. */
20073 if (generate_6432_conversion && !final_label)
20074 final_label = gen_label_rtx ();
20076 if (remain > 0)
20078 /* This is not the last block, branch to the end if the result
20079 of this subtract is not zero. */
20080 if (!final_label)
20081 final_label = gen_label_rtx ();
20082 rtx fin_ref = gen_rtx_LABEL_REF (VOIDmode, final_label);
20083 rtx tmp = gen_rtx_MINUS (word_mode, tmp_reg_src1, tmp_reg_src2);
20084 rtx cr = gen_reg_rtx (CCmode);
20085 rs6000_emit_dot_insn (tmp_reg_src2, tmp, 2, cr);
20086 emit_insn (gen_movsi (target,
20087 gen_lowpart (SImode, tmp_reg_src2)));
20088 rtx ne_rtx = gen_rtx_NE (VOIDmode, cr, const0_rtx);
20089 rtx ifelse = gen_rtx_IF_THEN_ELSE (VOIDmode, ne_rtx,
20090 fin_ref, pc_rtx);
20091 rtx j = emit_jump_insn (gen_rtx_SET (pc_rtx, ifelse));
20092 JUMP_LABEL (j) = final_label;
20093 LABEL_NUSES (final_label) += 1;
20095 else
20097 if (word_mode == DImode)
20099 emit_insn (gen_subdi3 (tmp_reg_src2, tmp_reg_src1,
20100 tmp_reg_src2));
20101 emit_insn (gen_movsi (target,
20102 gen_lowpart (SImode, tmp_reg_src2)));
20104 else
20105 emit_insn (gen_subsi3 (target, tmp_reg_src1, tmp_reg_src2));
20107 if (final_label)
20109 rtx fin_ref = gen_rtx_LABEL_REF (VOIDmode, final_label);
20110 rtx j = emit_jump_insn (gen_rtx_SET (pc_rtx, fin_ref));
20111 JUMP_LABEL(j) = final_label;
20112 LABEL_NUSES (final_label) += 1;
20113 emit_barrier ();
20117 else
20119 /* Do we need a 64->32 conversion block? We need the 64->32
20120 conversion even if target size == load_mode size because
20121 the subtract generates one extra bit. */
20122 generate_6432_conversion = true;
20124 if (remain > 0)
20126 if (!convert_label)
20127 convert_label = gen_label_rtx ();
20129 /* Compare to zero and branch to convert_label if not zero. */
20130 rtx cvt_ref = gen_rtx_LABEL_REF (VOIDmode, convert_label);
20131 if (TARGET_P9_MISC)
20133 /* Generate a compare, and convert with a setb later. */
20134 rtx cmp = gen_rtx_COMPARE (CCUNSmode, tmp_reg_src1,
20135 tmp_reg_src2);
20136 emit_insn (gen_rtx_SET (cond, cmp));
20138 else
20139 /* Generate a subfc. and use the longer
20140 sequence for conversion. */
20141 if (TARGET_64BIT)
20142 emit_insn (gen_subfdi3_carry_dot2 (tmp_reg_src2, tmp_reg_src2,
20143 tmp_reg_src1, cond));
20144 else
20145 emit_insn (gen_subfsi3_carry_dot2 (tmp_reg_src2, tmp_reg_src2,
20146 tmp_reg_src1, cond));
20147 rtx ne_rtx = gen_rtx_NE (VOIDmode, cond, const0_rtx);
20148 rtx ifelse = gen_rtx_IF_THEN_ELSE (VOIDmode, ne_rtx,
20149 cvt_ref, pc_rtx);
20150 rtx j = emit_jump_insn (gen_rtx_SET (pc_rtx, ifelse));
20151 JUMP_LABEL(j) = convert_label;
20152 LABEL_NUSES (convert_label) += 1;
20154 else
20156 /* Just do the subtract/compare. Since this is the last block
20157 the convert code will be generated immediately following. */
20158 if (TARGET_P9_MISC)
20160 rtx cmp = gen_rtx_COMPARE (CCUNSmode, tmp_reg_src1,
20161 tmp_reg_src2);
20162 emit_insn (gen_rtx_SET (cond, cmp));
20164 else
20165 if (TARGET_64BIT)
20166 emit_insn (gen_subfdi3_carry (tmp_reg_src2, tmp_reg_src2,
20167 tmp_reg_src1));
20168 else
20169 emit_insn (gen_subfsi3_carry (tmp_reg_src2, tmp_reg_src2,
20170 tmp_reg_src1));
20174 offset += cmp_bytes;
20175 bytes -= cmp_bytes;
20178 if (generate_6432_conversion)
20180 if (convert_label)
20181 emit_label (convert_label);
20183 /* We need to produce DI result from sub, then convert to target SI
20184 while maintaining <0 / ==0 / >0 properties. This sequence works:
20185 subfc L,A,B
20186 subfe H,H,H
20187 popcntd L,L
20188 rldimi L,H,6,0
20190 This is an alternate one Segher cooked up if somebody
20191 wants to expand this for something that doesn't have popcntd:
20192 subfc L,a,b
20193 subfe H,x,x
20194 addic t,L,-1
20195 subfe v,t,L
20196 or z,v,H
20198 And finally, p9 can just do this:
20199 cmpld A,B
20200 setb r */
20202 if (TARGET_P9_MISC)
20204 emit_insn (gen_setb_unsigned (target, cond));
20206 else
20208 if (TARGET_64BIT)
20210 rtx tmp_reg_ca = gen_reg_rtx (DImode);
20211 emit_insn (gen_subfdi3_carry_in_xx (tmp_reg_ca));
20212 emit_insn (gen_popcntddi2 (tmp_reg_src2, tmp_reg_src2));
20213 emit_insn (gen_iordi3 (tmp_reg_src2, tmp_reg_src2, tmp_reg_ca));
20214 emit_insn (gen_movsi (target, gen_lowpart (SImode, tmp_reg_src2)));
20216 else
20218 rtx tmp_reg_ca = gen_reg_rtx (SImode);
20219 emit_insn (gen_subfsi3_carry_in_xx (tmp_reg_ca));
20220 emit_insn (gen_popcntdsi2 (tmp_reg_src2, tmp_reg_src2));
20221 emit_insn (gen_iorsi3 (target, tmp_reg_src2, tmp_reg_ca));
20226 if (final_label)
20227 emit_label (final_label);
20229 gcc_assert (bytes == 0);
20230 return true;
20233 /* Generate alignment check and branch code to set up for
20234 strncmp when we don't have DI alignment.
20235 STRNCMP_LABEL is the label to branch if there is a page crossing.
20236 SRC is the string pointer to be examined.
20237 BYTES is the max number of bytes to compare. */
20238 static void
20239 expand_strncmp_align_check (rtx strncmp_label, rtx src, HOST_WIDE_INT bytes)
20241 rtx lab_ref = gen_rtx_LABEL_REF (VOIDmode, strncmp_label);
20242 rtx src_check = copy_addr_to_reg (XEXP (src, 0));
20243 if (GET_MODE (src_check) == SImode)
20244 emit_insn (gen_andsi3 (src_check, src_check, GEN_INT (0xfff)));
20245 else
20246 emit_insn (gen_anddi3 (src_check, src_check, GEN_INT (0xfff)));
20247 rtx cond = gen_reg_rtx (CCmode);
20248 emit_move_insn (cond, gen_rtx_COMPARE (CCmode, src_check,
20249 GEN_INT (4096 - bytes)));
20251 rtx cmp_rtx = gen_rtx_LT (VOIDmode, cond, const0_rtx);
20253 rtx ifelse = gen_rtx_IF_THEN_ELSE (VOIDmode, cmp_rtx,
20254 pc_rtx, lab_ref);
20255 rtx j = emit_jump_insn (gen_rtx_SET (pc_rtx, ifelse));
20256 JUMP_LABEL (j) = strncmp_label;
20257 LABEL_NUSES (strncmp_label) += 1;
20260 /* Expand a string compare operation with length, and return
20261 true if successful. Return false if we should let the
20262 compiler generate normal code, probably a strncmp call.
20264 OPERANDS[0] is the target (result).
20265 OPERANDS[1] is the first source.
20266 OPERANDS[2] is the second source.
20267 If NO_LENGTH is zero, then:
20268 OPERANDS[3] is the length.
20269 OPERANDS[4] is the alignment in bytes.
20270 If NO_LENGTH is nonzero, then:
20271 OPERANDS[3] is the alignment in bytes. */
20272 bool
20273 expand_strn_compare (rtx operands[], int no_length)
20275 rtx target = operands[0];
20276 rtx orig_src1 = operands[1];
20277 rtx orig_src2 = operands[2];
20278 rtx bytes_rtx, align_rtx;
20279 if (no_length)
20281 bytes_rtx = NULL;
20282 align_rtx = operands[3];
20284 else
20286 bytes_rtx = operands[3];
20287 align_rtx = operands[4];
20289 unsigned HOST_WIDE_INT cmp_bytes = 0;
20290 rtx src1 = orig_src1;
20291 rtx src2 = orig_src2;
20293 /* If we have a length, it must be constant. This simplifies things
20294 a bit as we don't have to generate code to check if we've exceeded
20295 the length. Later this could be expanded to handle this case. */
20296 if (!no_length && !CONST_INT_P (bytes_rtx))
20297 return false;
20299 /* This must be a fixed size alignment. */
20300 if (!CONST_INT_P (align_rtx))
20301 return false;
20303 unsigned int base_align = UINTVAL (align_rtx);
20304 int align1 = MEM_ALIGN (orig_src1) / BITS_PER_UNIT;
20305 int align2 = MEM_ALIGN (orig_src2) / BITS_PER_UNIT;
20307 /* SLOW_UNALIGNED_ACCESS -- don't do unaligned stuff. */
20308 if (SLOW_UNALIGNED_ACCESS (word_mode, align1)
20309 || SLOW_UNALIGNED_ACCESS (word_mode, align2))
20310 return false;
20312 gcc_assert (GET_MODE (target) == SImode);
20314 /* If we have an LE target without ldbrx and word_mode is DImode,
20315 then we must avoid using word_mode. */
20316 int word_mode_ok = !(!BYTES_BIG_ENDIAN && !TARGET_LDBRX
20317 && word_mode == DImode);
20319 unsigned int word_mode_size = GET_MODE_SIZE (word_mode);
20321 unsigned HOST_WIDE_INT offset = 0;
20322 unsigned HOST_WIDE_INT bytes; /* N from the strncmp args if available. */
20323 unsigned HOST_WIDE_INT compare_length; /* How much to compare inline. */
20324 if (no_length)
20325 /* Use this as a standin to determine the mode to use. */
20326 bytes = rs6000_string_compare_inline_limit * word_mode_size;
20327 else
20328 bytes = UINTVAL (bytes_rtx);
20330 machine_mode load_mode =
20331 select_block_compare_mode (offset, bytes, base_align, word_mode_ok);
20332 unsigned int load_mode_size = GET_MODE_SIZE (load_mode);
20333 compare_length = rs6000_string_compare_inline_limit * load_mode_size;
20335 /* If we have equality at the end of the last compare and we have not
20336 found the end of the string, we need to call strcmp/strncmp to
20337 compare the remainder. */
20338 bool equality_compare_rest = false;
20340 if (no_length)
20342 bytes = compare_length;
20343 equality_compare_rest = true;
20345 else
20347 if (bytes <= compare_length)
20348 compare_length = bytes;
20349 else
20350 equality_compare_rest = true;
20353 rtx result_reg = gen_reg_rtx (word_mode);
20354 rtx final_move_label = gen_label_rtx ();
20355 rtx final_label = gen_label_rtx ();
20356 rtx begin_compare_label = NULL;
20358 if (base_align < 8)
20360 /* Generate code that checks distance to 4k boundary for this case. */
20361 begin_compare_label = gen_label_rtx ();
20362 rtx strncmp_label = gen_label_rtx ();
20363 rtx jmp;
20365 /* Strncmp for power8 in glibc does this:
20366 rldicl r8,r3,0,52
20367 cmpldi cr7,r8,4096-16
20368 bgt cr7,L(pagecross) */
20370 /* Make sure that the length we use for the alignment test and
20371 the subsequent code generation are in agreement so we do not
20372 go past the length we tested for a 4k boundary crossing. */
20373 unsigned HOST_WIDE_INT align_test = compare_length;
20374 if (align_test < 8)
20376 align_test = HOST_WIDE_INT_1U << ceil_log2 (align_test);
20377 base_align = align_test;
20379 else
20381 align_test = ROUND_UP (align_test, 8);
20382 base_align = 8;
20385 if (align1 < 8)
20386 expand_strncmp_align_check (strncmp_label, src1, align_test);
20387 if (align2 < 8)
20388 expand_strncmp_align_check (strncmp_label, src2, align_test);
20390 /* Now generate the following sequence:
20391 - branch to begin_compare
20392 - strncmp_label
20393 - call to strncmp
20394 - branch to final_label
20395 - begin_compare_label */
20397 rtx cmp_ref = gen_rtx_LABEL_REF (VOIDmode, begin_compare_label);
20398 jmp = emit_jump_insn (gen_rtx_SET (pc_rtx, cmp_ref));
20399 JUMP_LABEL (jmp) = begin_compare_label;
20400 LABEL_NUSES (begin_compare_label) += 1;
20401 emit_barrier ();
20403 emit_label (strncmp_label);
20405 if (!REG_P (XEXP (src1, 0)))
20407 rtx src1_reg = copy_addr_to_reg (XEXP (src1, 0));
20408 src1 = replace_equiv_address (src1, src1_reg);
20411 if (!REG_P (XEXP (src2, 0)))
20413 rtx src2_reg = copy_addr_to_reg (XEXP (src2, 0));
20414 src2 = replace_equiv_address (src2, src2_reg);
20417 if (no_length)
20419 tree fun = builtin_decl_explicit (BUILT_IN_STRCMP);
20420 emit_library_call_value (XEXP (DECL_RTL (fun), 0),
20421 target, LCT_NORMAL, GET_MODE (target), 2,
20422 force_reg (Pmode, XEXP (src1, 0)), Pmode,
20423 force_reg (Pmode, XEXP (src2, 0)), Pmode);
20425 else
20427 /* -m32 -mpowerpc64 results in word_mode being DImode even
20428 though otherwise it is 32-bit. The length arg to strncmp
20429 is a size_t which will be the same size as pointers. */
20430 rtx len_rtx;
20431 if (TARGET_64BIT)
20432 len_rtx = gen_reg_rtx (DImode);
20433 else
20434 len_rtx = gen_reg_rtx (SImode);
20436 emit_move_insn (len_rtx, bytes_rtx);
20438 tree fun = builtin_decl_explicit (BUILT_IN_STRNCMP);
20439 emit_library_call_value (XEXP (DECL_RTL (fun), 0),
20440 target, LCT_NORMAL, GET_MODE (target), 3,
20441 force_reg (Pmode, XEXP (src1, 0)), Pmode,
20442 force_reg (Pmode, XEXP (src2, 0)), Pmode,
20443 len_rtx, GET_MODE (len_rtx));
20446 rtx fin_ref = gen_rtx_LABEL_REF (VOIDmode, final_label);
20447 jmp = emit_jump_insn (gen_rtx_SET (pc_rtx, fin_ref));
20448 JUMP_LABEL (jmp) = final_label;
20449 LABEL_NUSES (final_label) += 1;
20450 emit_barrier ();
20451 emit_label (begin_compare_label);
20454 rtx cleanup_label = NULL;
20455 rtx tmp_reg_src1 = gen_reg_rtx (word_mode);
20456 rtx tmp_reg_src2 = gen_reg_rtx (word_mode);
20458 /* Generate sequence of ld/ldbrx, cmpb to compare out
20459 to the length specified. */
20460 unsigned HOST_WIDE_INT bytes_to_compare = compare_length;
20461 while (bytes_to_compare > 0)
20463 /* Compare sequence:
20464 check each 8B with: ld/ld cmpd bne
20465 If equal, use rldicr/cmpb to check for zero byte.
20466 cleanup code at end:
20467 cmpb get byte that differs
20468 cmpb look for zero byte
20469 orc combine
20470 cntlzd get bit of first zero/diff byte
20471 subfic convert for rldcl use
20472 rldcl rldcl extract diff/zero byte
20473 subf subtract for final result
20475 The last compare can branch around the cleanup code if the
20476 result is zero because the strings are exactly equal. */
20477 unsigned int align = compute_current_alignment (base_align, offset);
20478 if (TARGET_EFFICIENT_OVERLAPPING_UNALIGNED)
20479 load_mode = select_block_compare_mode (offset, bytes_to_compare, align,
20480 word_mode_ok);
20481 else
20482 load_mode = select_block_compare_mode (0, bytes_to_compare, align,
20483 word_mode_ok);
20484 load_mode_size = GET_MODE_SIZE (load_mode);
20485 if (bytes_to_compare >= load_mode_size)
20486 cmp_bytes = load_mode_size;
20487 else if (TARGET_EFFICIENT_OVERLAPPING_UNALIGNED)
20489 /* Move this load back so it doesn't go past the end.
20490 P8/P9 can do this efficiently. */
20491 unsigned int extra_bytes = load_mode_size - bytes_to_compare;
20492 cmp_bytes = bytes_to_compare;
20493 if (extra_bytes < offset)
20495 offset -= extra_bytes;
20496 cmp_bytes = load_mode_size;
20497 bytes_to_compare = cmp_bytes;
20500 else
20501 /* P7 and earlier can't do the overlapping load trick fast,
20502 so this forces a non-overlapping load and a shift to get
20503 rid of the extra bytes. */
20504 cmp_bytes = bytes_to_compare;
20506 src1 = adjust_address (orig_src1, load_mode, offset);
20507 src2 = adjust_address (orig_src2, load_mode, offset);
20509 if (!REG_P (XEXP (src1, 0)))
20511 rtx src1_reg = copy_addr_to_reg (XEXP (src1, 0));
20512 src1 = replace_equiv_address (src1, src1_reg);
20514 set_mem_size (src1, cmp_bytes);
20516 if (!REG_P (XEXP (src2, 0)))
20518 rtx src2_reg = copy_addr_to_reg (XEXP (src2, 0));
20519 src2 = replace_equiv_address (src2, src2_reg);
20521 set_mem_size (src2, cmp_bytes);
20523 do_load_for_compare (tmp_reg_src1, src1, load_mode);
20524 do_load_for_compare (tmp_reg_src2, src2, load_mode);
20526 /* We must always left-align the data we read, and
20527 clear any bytes to the right that are beyond the string.
20528 Otherwise the cmpb sequence won't produce the correct
20529 results. The beginning of the compare will be done
20530 with word_mode so will not have any extra shifts or
20531 clear rights. */
20533 if (load_mode_size < word_mode_size)
20535 /* Rotate left first. */
20536 rtx sh = GEN_INT (BITS_PER_UNIT * (word_mode_size - load_mode_size));
20537 if (word_mode == DImode)
20539 emit_insn (gen_rotldi3 (tmp_reg_src1, tmp_reg_src1, sh));
20540 emit_insn (gen_rotldi3 (tmp_reg_src2, tmp_reg_src2, sh));
20542 else
20544 emit_insn (gen_rotlsi3 (tmp_reg_src1, tmp_reg_src1, sh));
20545 emit_insn (gen_rotlsi3 (tmp_reg_src2, tmp_reg_src2, sh));
20549 if (cmp_bytes < word_mode_size)
20551 /* Now clear right. This plus the rotate can be
20552 turned into a rldicr instruction. */
20553 HOST_WIDE_INT mb = BITS_PER_UNIT * (word_mode_size - cmp_bytes);
20554 rtx mask = GEN_INT (HOST_WIDE_INT_M1U << mb);
20555 if (word_mode == DImode)
20557 emit_insn (gen_anddi3_mask (tmp_reg_src1, tmp_reg_src1, mask));
20558 emit_insn (gen_anddi3_mask (tmp_reg_src2, tmp_reg_src2, mask));
20560 else
20562 emit_insn (gen_andsi3_mask (tmp_reg_src1, tmp_reg_src1, mask));
20563 emit_insn (gen_andsi3_mask (tmp_reg_src2, tmp_reg_src2, mask));
20567 /* Cases to handle. A and B are chunks of the two strings.
20568 1: Not end of comparison:
20569 A != B: branch to cleanup code to compute result.
20570 A == B: check for 0 byte, next block if not found.
20571 2: End of the inline comparison:
20572 A != B: branch to cleanup code to compute result.
20573 A == B: check for 0 byte, call strcmp/strncmp
20574 3: compared requested N bytes:
20575 A == B: branch to result 0.
20576 A != B: cleanup code to compute result. */
20578 unsigned HOST_WIDE_INT remain = bytes_to_compare - cmp_bytes;
20580 rtx dst_label;
20581 if (remain > 0 || equality_compare_rest)
20583 /* Branch to cleanup code, otherwise fall through to do
20584 more compares. */
20585 if (!cleanup_label)
20586 cleanup_label = gen_label_rtx ();
20587 dst_label = cleanup_label;
20589 else
20590 /* Branch to end and produce result of 0. */
20591 dst_label = final_move_label;
20593 rtx lab_ref = gen_rtx_LABEL_REF (VOIDmode, dst_label);
20594 rtx cond = gen_reg_rtx (CCmode);
20596 /* Always produce the 0 result, it is needed if
20597 cmpb finds a 0 byte in this chunk. */
20598 rtx tmp = gen_rtx_MINUS (word_mode, tmp_reg_src1, tmp_reg_src2);
20599 rs6000_emit_dot_insn (result_reg, tmp, 1, cond);
20601 rtx cmp_rtx;
20602 if (remain == 0 && !equality_compare_rest)
20603 cmp_rtx = gen_rtx_EQ (VOIDmode, cond, const0_rtx);
20604 else
20605 cmp_rtx = gen_rtx_NE (VOIDmode, cond, const0_rtx);
20607 rtx ifelse = gen_rtx_IF_THEN_ELSE (VOIDmode, cmp_rtx,
20608 lab_ref, pc_rtx);
20609 rtx j = emit_jump_insn (gen_rtx_SET (pc_rtx, ifelse));
20610 JUMP_LABEL (j) = dst_label;
20611 LABEL_NUSES (dst_label) += 1;
20613 if (remain > 0 || equality_compare_rest)
20615 /* Generate a cmpb to test for a 0 byte and branch
20616 to final result if found. */
20617 rtx cmpb_zero = gen_reg_rtx (word_mode);
20618 rtx lab_ref_fin = gen_rtx_LABEL_REF (VOIDmode, final_move_label);
20619 rtx condz = gen_reg_rtx (CCmode);
20620 rtx zero_reg = gen_reg_rtx (word_mode);
20621 if (word_mode == SImode)
20623 emit_insn (gen_movsi (zero_reg, GEN_INT (0)));
20624 emit_insn (gen_cmpbsi3 (cmpb_zero, tmp_reg_src1, zero_reg));
20625 if (cmp_bytes < word_mode_size)
20627 /* Don't want to look at zero bytes past end. */
20628 HOST_WIDE_INT mb =
20629 BITS_PER_UNIT * (word_mode_size - cmp_bytes);
20630 rtx mask = GEN_INT (HOST_WIDE_INT_M1U << mb);
20631 emit_insn (gen_andsi3_mask (cmpb_zero, cmpb_zero, mask));
20634 else
20636 emit_insn (gen_movdi (zero_reg, GEN_INT (0)));
20637 emit_insn (gen_cmpbdi3 (cmpb_zero, tmp_reg_src1, zero_reg));
20638 if (cmp_bytes < word_mode_size)
20640 /* Don't want to look at zero bytes past end. */
20641 HOST_WIDE_INT mb =
20642 BITS_PER_UNIT * (word_mode_size - cmp_bytes);
20643 rtx mask = GEN_INT (HOST_WIDE_INT_M1U << mb);
20644 emit_insn (gen_anddi3_mask (cmpb_zero, cmpb_zero, mask));
20648 emit_move_insn (condz, gen_rtx_COMPARE (CCmode, cmpb_zero, zero_reg));
20649 rtx cmpnz_rtx = gen_rtx_NE (VOIDmode, condz, const0_rtx);
20650 rtx ifelse = gen_rtx_IF_THEN_ELSE (VOIDmode, cmpnz_rtx,
20651 lab_ref_fin, pc_rtx);
20652 rtx j2 = emit_jump_insn (gen_rtx_SET (pc_rtx, ifelse));
20653 JUMP_LABEL (j2) = final_move_label;
20654 LABEL_NUSES (final_move_label) += 1;
20658 offset += cmp_bytes;
20659 bytes_to_compare -= cmp_bytes;
20662 if (equality_compare_rest)
20664 /* Update pointers past what has been compared already. */
20665 src1 = adjust_address (orig_src1, load_mode, offset);
20666 src2 = adjust_address (orig_src2, load_mode, offset);
20668 if (!REG_P (XEXP (src1, 0)))
20670 rtx src1_reg = copy_addr_to_reg (XEXP (src1, 0));
20671 src1 = replace_equiv_address (src1, src1_reg);
20673 set_mem_size (src1, cmp_bytes);
20675 if (!REG_P (XEXP (src2, 0)))
20677 rtx src2_reg = copy_addr_to_reg (XEXP (src2, 0));
20678 src2 = replace_equiv_address (src2, src2_reg);
20680 set_mem_size (src2, cmp_bytes);
20682 /* Construct call to strcmp/strncmp to compare the rest of the string. */
20683 if (no_length)
20685 tree fun = builtin_decl_explicit (BUILT_IN_STRCMP);
20686 emit_library_call_value (XEXP (DECL_RTL (fun), 0),
20687 target, LCT_NORMAL, GET_MODE (target), 2,
20688 force_reg (Pmode, XEXP (src1, 0)), Pmode,
20689 force_reg (Pmode, XEXP (src2, 0)), Pmode);
20691 else
20693 rtx len_rtx;
20694 if (TARGET_64BIT)
20695 len_rtx = gen_reg_rtx (DImode);
20696 else
20697 len_rtx = gen_reg_rtx (SImode);
20699 emit_move_insn (len_rtx, GEN_INT (bytes - compare_length));
20700 tree fun = builtin_decl_explicit (BUILT_IN_STRNCMP);
20701 emit_library_call_value (XEXP (DECL_RTL (fun), 0),
20702 target, LCT_NORMAL, GET_MODE (target), 3,
20703 force_reg (Pmode, XEXP (src1, 0)), Pmode,
20704 force_reg (Pmode, XEXP (src2, 0)), Pmode,
20705 len_rtx, GET_MODE (len_rtx));
20708 rtx fin_ref = gen_rtx_LABEL_REF (VOIDmode, final_label);
20709 rtx jmp = emit_jump_insn (gen_rtx_SET (pc_rtx, fin_ref));
20710 JUMP_LABEL (jmp) = final_label;
20711 LABEL_NUSES (final_label) += 1;
20712 emit_barrier ();
20715 if (cleanup_label)
20716 emit_label (cleanup_label);
20718 /* Generate the final sequence that identifies the differing
20719 byte and generates the final result, taking into account
20720 zero bytes:
20722 cmpb cmpb_result1, src1, src2
20723 cmpb cmpb_result2, src1, zero
20724 orc cmpb_result1, cmp_result1, cmpb_result2
20725 cntlzd get bit of first zero/diff byte
20726 addi convert for rldcl use
20727 rldcl rldcl extract diff/zero byte
20728 subf subtract for final result
20731 rtx cmpb_diff = gen_reg_rtx (word_mode);
20732 rtx cmpb_zero = gen_reg_rtx (word_mode);
20733 rtx rot_amt = gen_reg_rtx (word_mode);
20734 rtx zero_reg = gen_reg_rtx (word_mode);
20736 rtx rot1_1 = gen_reg_rtx (word_mode);
20737 rtx rot1_2 = gen_reg_rtx (word_mode);
20738 rtx rot2_1 = gen_reg_rtx (word_mode);
20739 rtx rot2_2 = gen_reg_rtx (word_mode);
20741 if (word_mode == SImode)
20743 emit_insn (gen_cmpbsi3 (cmpb_diff, tmp_reg_src1, tmp_reg_src2));
20744 emit_insn (gen_movsi (zero_reg, GEN_INT (0)));
20745 emit_insn (gen_cmpbsi3 (cmpb_zero, tmp_reg_src1, zero_reg));
20746 emit_insn (gen_one_cmplsi2 (cmpb_diff,cmpb_diff));
20747 emit_insn (gen_iorsi3 (cmpb_diff, cmpb_diff, cmpb_zero));
20748 emit_insn (gen_clzsi2 (rot_amt, cmpb_diff));
20749 emit_insn (gen_addsi3 (rot_amt, rot_amt, GEN_INT (8)));
20750 emit_insn (gen_rotlsi3 (rot1_1, tmp_reg_src1,
20751 gen_lowpart (SImode, rot_amt)));
20752 emit_insn (gen_andsi3_mask (rot1_2, rot1_1, GEN_INT (0xff)));
20753 emit_insn (gen_rotlsi3 (rot2_1, tmp_reg_src2,
20754 gen_lowpart (SImode, rot_amt)));
20755 emit_insn (gen_andsi3_mask (rot2_2, rot2_1, GEN_INT (0xff)));
20756 emit_insn (gen_subsi3 (result_reg, rot1_2, rot2_2));
20758 else
20760 emit_insn (gen_cmpbdi3 (cmpb_diff, tmp_reg_src1, tmp_reg_src2));
20761 emit_insn (gen_movdi (zero_reg, GEN_INT (0)));
20762 emit_insn (gen_cmpbdi3 (cmpb_zero, tmp_reg_src1, zero_reg));
20763 emit_insn (gen_one_cmpldi2 (cmpb_diff,cmpb_diff));
20764 emit_insn (gen_iordi3 (cmpb_diff, cmpb_diff, cmpb_zero));
20765 emit_insn (gen_clzdi2 (rot_amt, cmpb_diff));
20766 emit_insn (gen_adddi3 (rot_amt, rot_amt, GEN_INT (8)));
20767 emit_insn (gen_rotldi3 (rot1_1, tmp_reg_src1,
20768 gen_lowpart (SImode, rot_amt)));
20769 emit_insn (gen_anddi3_mask (rot1_2, rot1_1, GEN_INT (0xff)));
20770 emit_insn (gen_rotldi3 (rot2_1, tmp_reg_src2,
20771 gen_lowpart (SImode, rot_amt)));
20772 emit_insn (gen_anddi3_mask (rot2_2, rot2_1, GEN_INT (0xff)));
20773 emit_insn (gen_subdi3 (result_reg, rot1_2, rot2_2));
20776 emit_label (final_move_label);
20777 emit_insn (gen_movsi (target,
20778 gen_lowpart (SImode, result_reg)));
20779 emit_label (final_label);
20780 return true;
20783 /* Expand a block move operation, and return 1 if successful. Return 0
20784 if we should let the compiler generate normal code.
20786 operands[0] is the destination
20787 operands[1] is the source
20788 operands[2] is the length
20789 operands[3] is the alignment */
20791 #define MAX_MOVE_REG 4
20794 expand_block_move (rtx operands[])
20796 rtx orig_dest = operands[0];
20797 rtx orig_src = operands[1];
20798 rtx bytes_rtx = operands[2];
20799 rtx align_rtx = operands[3];
20800 int constp = (GET_CODE (bytes_rtx) == CONST_INT);
20801 int align;
20802 int bytes;
20803 int offset;
20804 int move_bytes;
20805 rtx stores[MAX_MOVE_REG];
20806 int num_reg = 0;
20808 /* If this is not a fixed size move, just call memcpy */
20809 if (! constp)
20810 return 0;
20812 /* This must be a fixed size alignment */
20813 gcc_assert (GET_CODE (align_rtx) == CONST_INT);
20814 align = INTVAL (align_rtx) * BITS_PER_UNIT;
20816 /* Anything to move? */
20817 bytes = INTVAL (bytes_rtx);
20818 if (bytes <= 0)
20819 return 1;
20821 if (bytes > rs6000_block_move_inline_limit)
20822 return 0;
20824 for (offset = 0; bytes > 0; offset += move_bytes, bytes -= move_bytes)
20826 union {
20827 rtx (*movmemsi) (rtx, rtx, rtx, rtx);
20828 rtx (*mov) (rtx, rtx);
20829 } gen_func;
20830 machine_mode mode = BLKmode;
20831 rtx src, dest;
20833 /* Altivec first, since it will be faster than a string move
20834 when it applies, and usually not significantly larger. */
20835 if (TARGET_ALTIVEC && bytes >= 16 && align >= 128)
20837 move_bytes = 16;
20838 mode = V4SImode;
20839 gen_func.mov = gen_movv4si;
20841 else if (TARGET_SPE && bytes >= 8 && align >= 64)
20843 move_bytes = 8;
20844 mode = V2SImode;
20845 gen_func.mov = gen_movv2si;
20847 else if (TARGET_STRING
20848 && bytes > 24 /* move up to 32 bytes at a time */
20849 && ! fixed_regs[5]
20850 && ! fixed_regs[6]
20851 && ! fixed_regs[7]
20852 && ! fixed_regs[8]
20853 && ! fixed_regs[9]
20854 && ! fixed_regs[10]
20855 && ! fixed_regs[11]
20856 && ! fixed_regs[12])
20858 move_bytes = (bytes > 32) ? 32 : bytes;
20859 gen_func.movmemsi = gen_movmemsi_8reg;
20861 else if (TARGET_STRING
20862 && bytes > 16 /* move up to 24 bytes at a time */
20863 && ! fixed_regs[5]
20864 && ! fixed_regs[6]
20865 && ! fixed_regs[7]
20866 && ! fixed_regs[8]
20867 && ! fixed_regs[9]
20868 && ! fixed_regs[10])
20870 move_bytes = (bytes > 24) ? 24 : bytes;
20871 gen_func.movmemsi = gen_movmemsi_6reg;
20873 else if (TARGET_STRING
20874 && bytes > 8 /* move up to 16 bytes at a time */
20875 && ! fixed_regs[5]
20876 && ! fixed_regs[6]
20877 && ! fixed_regs[7]
20878 && ! fixed_regs[8])
20880 move_bytes = (bytes > 16) ? 16 : bytes;
20881 gen_func.movmemsi = gen_movmemsi_4reg;
20883 else if (bytes >= 8 && TARGET_POWERPC64
20884 && (align >= 64 || !STRICT_ALIGNMENT))
20886 move_bytes = 8;
20887 mode = DImode;
20888 gen_func.mov = gen_movdi;
20889 if (offset == 0 && align < 64)
20891 rtx addr;
20893 /* If the address form is reg+offset with offset not a
20894 multiple of four, reload into reg indirect form here
20895 rather than waiting for reload. This way we get one
20896 reload, not one per load and/or store. */
20897 addr = XEXP (orig_dest, 0);
20898 if ((GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
20899 && GET_CODE (XEXP (addr, 1)) == CONST_INT
20900 && (INTVAL (XEXP (addr, 1)) & 3) != 0)
20902 addr = copy_addr_to_reg (addr);
20903 orig_dest = replace_equiv_address (orig_dest, addr);
20905 addr = XEXP (orig_src, 0);
20906 if ((GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
20907 && GET_CODE (XEXP (addr, 1)) == CONST_INT
20908 && (INTVAL (XEXP (addr, 1)) & 3) != 0)
20910 addr = copy_addr_to_reg (addr);
20911 orig_src = replace_equiv_address (orig_src, addr);
20915 else if (TARGET_STRING && bytes > 4 && !TARGET_POWERPC64)
20916 { /* move up to 8 bytes at a time */
20917 move_bytes = (bytes > 8) ? 8 : bytes;
20918 gen_func.movmemsi = gen_movmemsi_2reg;
20920 else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
20921 { /* move 4 bytes */
20922 move_bytes = 4;
20923 mode = SImode;
20924 gen_func.mov = gen_movsi;
20926 else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
20927 { /* move 2 bytes */
20928 move_bytes = 2;
20929 mode = HImode;
20930 gen_func.mov = gen_movhi;
20932 else if (TARGET_STRING && bytes > 1)
20933 { /* move up to 4 bytes at a time */
20934 move_bytes = (bytes > 4) ? 4 : bytes;
20935 gen_func.movmemsi = gen_movmemsi_1reg;
20937 else /* move 1 byte at a time */
20939 move_bytes = 1;
20940 mode = QImode;
20941 gen_func.mov = gen_movqi;
20944 src = adjust_address (orig_src, mode, offset);
20945 dest = adjust_address (orig_dest, mode, offset);
20947 if (mode != BLKmode)
20949 rtx tmp_reg = gen_reg_rtx (mode);
20951 emit_insn ((*gen_func.mov) (tmp_reg, src));
20952 stores[num_reg++] = (*gen_func.mov) (dest, tmp_reg);
20955 if (mode == BLKmode || num_reg >= MAX_MOVE_REG || bytes == move_bytes)
20957 int i;
20958 for (i = 0; i < num_reg; i++)
20959 emit_insn (stores[i]);
20960 num_reg = 0;
20963 if (mode == BLKmode)
20965 /* Move the address into scratch registers. The movmemsi
20966 patterns require zero offset. */
20967 if (!REG_P (XEXP (src, 0)))
20969 rtx src_reg = copy_addr_to_reg (XEXP (src, 0));
20970 src = replace_equiv_address (src, src_reg);
20972 set_mem_size (src, move_bytes);
20974 if (!REG_P (XEXP (dest, 0)))
20976 rtx dest_reg = copy_addr_to_reg (XEXP (dest, 0));
20977 dest = replace_equiv_address (dest, dest_reg);
20979 set_mem_size (dest, move_bytes);
20981 emit_insn ((*gen_func.movmemsi) (dest, src,
20982 GEN_INT (move_bytes & 31),
20983 align_rtx));
20987 return 1;
20991 /* Return a string to perform a load_multiple operation.
20992 operands[0] is the vector.
20993 operands[1] is the source address.
20994 operands[2] is the first destination register. */
20996 const char *
20997 rs6000_output_load_multiple (rtx operands[3])
20999 /* We have to handle the case where the pseudo used to contain the address
21000 is assigned to one of the output registers. */
21001 int i, j;
21002 int words = XVECLEN (operands[0], 0);
21003 rtx xop[10];
21005 if (XVECLEN (operands[0], 0) == 1)
21006 return "lwz %2,0(%1)";
21008 for (i = 0; i < words; i++)
21009 if (refers_to_regno_p (REGNO (operands[2]) + i, operands[1]))
21011 if (i == words-1)
21013 xop[0] = GEN_INT (4 * (words-1));
21014 xop[1] = operands[1];
21015 xop[2] = operands[2];
21016 output_asm_insn ("lswi %2,%1,%0\n\tlwz %1,%0(%1)", xop);
21017 return "";
21019 else if (i == 0)
21021 xop[0] = GEN_INT (4 * (words-1));
21022 xop[1] = operands[1];
21023 xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
21024 output_asm_insn ("addi %1,%1,4\n\tlswi %2,%1,%0\n\tlwz %1,-4(%1)", xop);
21025 return "";
21027 else
21029 for (j = 0; j < words; j++)
21030 if (j != i)
21032 xop[0] = GEN_INT (j * 4);
21033 xop[1] = operands[1];
21034 xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + j);
21035 output_asm_insn ("lwz %2,%0(%1)", xop);
21037 xop[0] = GEN_INT (i * 4);
21038 xop[1] = operands[1];
21039 output_asm_insn ("lwz %1,%0(%1)", xop);
21040 return "";
21044 return "lswi %2,%1,%N0";
21048 /* A validation routine: say whether CODE, a condition code, and MODE
21049 match. The other alternatives either don't make sense or should
21050 never be generated. */
21052 void
21053 validate_condition_mode (enum rtx_code code, machine_mode mode)
21055 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
21056 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
21057 && GET_MODE_CLASS (mode) == MODE_CC);
21059 /* These don't make sense. */
21060 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
21061 || mode != CCUNSmode);
21063 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
21064 || mode == CCUNSmode);
21066 gcc_assert (mode == CCFPmode
21067 || (code != ORDERED && code != UNORDERED
21068 && code != UNEQ && code != LTGT
21069 && code != UNGT && code != UNLT
21070 && code != UNGE && code != UNLE));
21072 /* These should never be generated except for
21073 flag_finite_math_only. */
21074 gcc_assert (mode != CCFPmode
21075 || flag_finite_math_only
21076 || (code != LE && code != GE
21077 && code != UNEQ && code != LTGT
21078 && code != UNGT && code != UNLT));
21080 /* These are invalid; the information is not there. */
21081 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
21085 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm,
21086 rldicl, rldicr, or rldic instruction in mode MODE. If so, if E is
21087 not zero, store there the bit offset (counted from the right) where
21088 the single stretch of 1 bits begins; and similarly for B, the bit
21089 offset where it ends. */
21091 bool
21092 rs6000_is_valid_mask (rtx mask, int *b, int *e, machine_mode mode)
21094 unsigned HOST_WIDE_INT val = INTVAL (mask);
21095 unsigned HOST_WIDE_INT bit;
21096 int nb, ne;
21097 int n = GET_MODE_PRECISION (mode);
21099 if (mode != DImode && mode != SImode)
21100 return false;
21102 if (INTVAL (mask) >= 0)
21104 bit = val & -val;
21105 ne = exact_log2 (bit);
21106 nb = exact_log2 (val + bit);
21108 else if (val + 1 == 0)
21110 nb = n;
21111 ne = 0;
21113 else if (val & 1)
21115 val = ~val;
21116 bit = val & -val;
21117 nb = exact_log2 (bit);
21118 ne = exact_log2 (val + bit);
21120 else
21122 bit = val & -val;
21123 ne = exact_log2 (bit);
21124 if (val + bit == 0)
21125 nb = n;
21126 else
21127 nb = 0;
21130 nb--;
21132 if (nb < 0 || ne < 0 || nb >= n || ne >= n)
21133 return false;
21135 if (b)
21136 *b = nb;
21137 if (e)
21138 *e = ne;
21140 return true;
21143 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm, rldicl,
21144 or rldicr instruction, to implement an AND with it in mode MODE. */
21146 bool
21147 rs6000_is_valid_and_mask (rtx mask, machine_mode mode)
21149 int nb, ne;
21151 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
21152 return false;
21154 /* For DImode, we need a rldicl, rldicr, or a rlwinm with mask that
21155 does not wrap. */
21156 if (mode == DImode)
21157 return (ne == 0 || nb == 63 || (nb < 32 && ne <= nb));
21159 /* For SImode, rlwinm can do everything. */
21160 if (mode == SImode)
21161 return (nb < 32 && ne < 32);
21163 return false;
21166 /* Return the instruction template for an AND with mask in mode MODE, with
21167 operands OPERANDS. If DOT is true, make it a record-form instruction. */
21169 const char *
21170 rs6000_insn_for_and_mask (machine_mode mode, rtx *operands, bool dot)
21172 int nb, ne;
21174 if (!rs6000_is_valid_mask (operands[2], &nb, &ne, mode))
21175 gcc_unreachable ();
21177 if (mode == DImode && ne == 0)
21179 operands[3] = GEN_INT (63 - nb);
21180 if (dot)
21181 return "rldicl. %0,%1,0,%3";
21182 return "rldicl %0,%1,0,%3";
21185 if (mode == DImode && nb == 63)
21187 operands[3] = GEN_INT (63 - ne);
21188 if (dot)
21189 return "rldicr. %0,%1,0,%3";
21190 return "rldicr %0,%1,0,%3";
21193 if (nb < 32 && ne < 32)
21195 operands[3] = GEN_INT (31 - nb);
21196 operands[4] = GEN_INT (31 - ne);
21197 if (dot)
21198 return "rlwinm. %0,%1,0,%3,%4";
21199 return "rlwinm %0,%1,0,%3,%4";
21202 gcc_unreachable ();
21205 /* Return whether MASK (a CONST_INT) is a valid mask for any rlw[i]nm,
21206 rld[i]cl, rld[i]cr, or rld[i]c instruction, to implement an AND with
21207 shift SHIFT (a ROTATE, ASHIFT, or LSHIFTRT) in mode MODE. */
21209 bool
21210 rs6000_is_valid_shift_mask (rtx mask, rtx shift, machine_mode mode)
21212 int nb, ne;
21214 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
21215 return false;
21217 int n = GET_MODE_PRECISION (mode);
21218 int sh = -1;
21220 if (CONST_INT_P (XEXP (shift, 1)))
21222 sh = INTVAL (XEXP (shift, 1));
21223 if (sh < 0 || sh >= n)
21224 return false;
21227 rtx_code code = GET_CODE (shift);
21229 /* Convert any shift by 0 to a rotate, to simplify below code. */
21230 if (sh == 0)
21231 code = ROTATE;
21233 /* Convert rotate to simple shift if we can, to make analysis simpler. */
21234 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
21235 code = ASHIFT;
21236 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
21238 code = LSHIFTRT;
21239 sh = n - sh;
21242 /* DImode rotates need rld*. */
21243 if (mode == DImode && code == ROTATE)
21244 return (nb == 63 || ne == 0 || ne == sh);
21246 /* SImode rotates need rlw*. */
21247 if (mode == SImode && code == ROTATE)
21248 return (nb < 32 && ne < 32 && sh < 32);
21250 /* Wrap-around masks are only okay for rotates. */
21251 if (ne > nb)
21252 return false;
21254 /* Variable shifts are only okay for rotates. */
21255 if (sh < 0)
21256 return false;
21258 /* Don't allow ASHIFT if the mask is wrong for that. */
21259 if (code == ASHIFT && ne < sh)
21260 return false;
21262 /* If we can do it with an rlw*, we can do it. Don't allow LSHIFTRT
21263 if the mask is wrong for that. */
21264 if (nb < 32 && ne < 32 && sh < 32
21265 && !(code == LSHIFTRT && nb >= 32 - sh))
21266 return true;
21268 /* If we can do it with an rld*, we can do it. Don't allow LSHIFTRT
21269 if the mask is wrong for that. */
21270 if (code == LSHIFTRT)
21271 sh = 64 - sh;
21272 if (nb == 63 || ne == 0 || ne == sh)
21273 return !(code == LSHIFTRT && nb >= sh);
21275 return false;
21278 /* Return the instruction template for a shift with mask in mode MODE, with
21279 operands OPERANDS. If DOT is true, make it a record-form instruction. */
21281 const char *
21282 rs6000_insn_for_shift_mask (machine_mode mode, rtx *operands, bool dot)
21284 int nb, ne;
21286 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
21287 gcc_unreachable ();
21289 if (mode == DImode && ne == 0)
21291 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
21292 operands[2] = GEN_INT (64 - INTVAL (operands[2]));
21293 operands[3] = GEN_INT (63 - nb);
21294 if (dot)
21295 return "rld%I2cl. %0,%1,%2,%3";
21296 return "rld%I2cl %0,%1,%2,%3";
21299 if (mode == DImode && nb == 63)
21301 operands[3] = GEN_INT (63 - ne);
21302 if (dot)
21303 return "rld%I2cr. %0,%1,%2,%3";
21304 return "rld%I2cr %0,%1,%2,%3";
21307 if (mode == DImode
21308 && GET_CODE (operands[4]) != LSHIFTRT
21309 && CONST_INT_P (operands[2])
21310 && ne == INTVAL (operands[2]))
21312 operands[3] = GEN_INT (63 - nb);
21313 if (dot)
21314 return "rld%I2c. %0,%1,%2,%3";
21315 return "rld%I2c %0,%1,%2,%3";
21318 if (nb < 32 && ne < 32)
21320 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
21321 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
21322 operands[3] = GEN_INT (31 - nb);
21323 operands[4] = GEN_INT (31 - ne);
21324 /* This insn can also be a 64-bit rotate with mask that really makes
21325 it just a shift right (with mask); the %h below are to adjust for
21326 that situation (shift count is >= 32 in that case). */
21327 if (dot)
21328 return "rlw%I2nm. %0,%1,%h2,%3,%4";
21329 return "rlw%I2nm %0,%1,%h2,%3,%4";
21332 gcc_unreachable ();
21335 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwimi or
21336 rldimi instruction, to implement an insert with shift SHIFT (a ROTATE,
21337 ASHIFT, or LSHIFTRT) in mode MODE. */
21339 bool
21340 rs6000_is_valid_insert_mask (rtx mask, rtx shift, machine_mode mode)
21342 int nb, ne;
21344 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
21345 return false;
21347 int n = GET_MODE_PRECISION (mode);
21349 int sh = INTVAL (XEXP (shift, 1));
21350 if (sh < 0 || sh >= n)
21351 return false;
21353 rtx_code code = GET_CODE (shift);
21355 /* Convert any shift by 0 to a rotate, to simplify below code. */
21356 if (sh == 0)
21357 code = ROTATE;
21359 /* Convert rotate to simple shift if we can, to make analysis simpler. */
21360 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
21361 code = ASHIFT;
21362 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
21364 code = LSHIFTRT;
21365 sh = n - sh;
21368 /* DImode rotates need rldimi. */
21369 if (mode == DImode && code == ROTATE)
21370 return (ne == sh);
21372 /* SImode rotates need rlwimi. */
21373 if (mode == SImode && code == ROTATE)
21374 return (nb < 32 && ne < 32 && sh < 32);
21376 /* Wrap-around masks are only okay for rotates. */
21377 if (ne > nb)
21378 return false;
21380 /* Don't allow ASHIFT if the mask is wrong for that. */
21381 if (code == ASHIFT && ne < sh)
21382 return false;
21384 /* If we can do it with an rlwimi, we can do it. Don't allow LSHIFTRT
21385 if the mask is wrong for that. */
21386 if (nb < 32 && ne < 32 && sh < 32
21387 && !(code == LSHIFTRT && nb >= 32 - sh))
21388 return true;
21390 /* If we can do it with an rldimi, we can do it. Don't allow LSHIFTRT
21391 if the mask is wrong for that. */
21392 if (code == LSHIFTRT)
21393 sh = 64 - sh;
21394 if (ne == sh)
21395 return !(code == LSHIFTRT && nb >= sh);
21397 return false;
21400 /* Return the instruction template for an insert with mask in mode MODE, with
21401 operands OPERANDS. If DOT is true, make it a record-form instruction. */
21403 const char *
21404 rs6000_insn_for_insert_mask (machine_mode mode, rtx *operands, bool dot)
21406 int nb, ne;
21408 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
21409 gcc_unreachable ();
21411 /* Prefer rldimi because rlwimi is cracked. */
21412 if (TARGET_POWERPC64
21413 && (!dot || mode == DImode)
21414 && GET_CODE (operands[4]) != LSHIFTRT
21415 && ne == INTVAL (operands[2]))
21417 operands[3] = GEN_INT (63 - nb);
21418 if (dot)
21419 return "rldimi. %0,%1,%2,%3";
21420 return "rldimi %0,%1,%2,%3";
21423 if (nb < 32 && ne < 32)
21425 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
21426 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
21427 operands[3] = GEN_INT (31 - nb);
21428 operands[4] = GEN_INT (31 - ne);
21429 if (dot)
21430 return "rlwimi. %0,%1,%2,%3,%4";
21431 return "rlwimi %0,%1,%2,%3,%4";
21434 gcc_unreachable ();
21437 /* Return whether an AND with C (a CONST_INT) in mode MODE can be done
21438 using two machine instructions. */
21440 bool
21441 rs6000_is_valid_2insn_and (rtx c, machine_mode mode)
21443 /* There are two kinds of AND we can handle with two insns:
21444 1) those we can do with two rl* insn;
21445 2) ori[s];xori[s].
21447 We do not handle that last case yet. */
21449 /* If there is just one stretch of ones, we can do it. */
21450 if (rs6000_is_valid_mask (c, NULL, NULL, mode))
21451 return true;
21453 /* Otherwise, fill in the lowest "hole"; if we can do the result with
21454 one insn, we can do the whole thing with two. */
21455 unsigned HOST_WIDE_INT val = INTVAL (c);
21456 unsigned HOST_WIDE_INT bit1 = val & -val;
21457 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
21458 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
21459 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
21460 return rs6000_is_valid_and_mask (GEN_INT (val + bit3 - bit2), mode);
21463 /* Emit the two insns to do an AND in mode MODE, with operands OPERANDS.
21464 If EXPAND is true, split rotate-and-mask instructions we generate to
21465 their constituent parts as well (this is used during expand); if DOT
21466 is 1, make the last insn a record-form instruction clobbering the
21467 destination GPR and setting the CC reg (from operands[3]); if 2, set
21468 that GPR as well as the CC reg. */
21470 void
21471 rs6000_emit_2insn_and (machine_mode mode, rtx *operands, bool expand, int dot)
21473 gcc_assert (!(expand && dot));
21475 unsigned HOST_WIDE_INT val = INTVAL (operands[2]);
21477 /* If it is one stretch of ones, it is DImode; shift left, mask, then
21478 shift right. This generates better code than doing the masks without
21479 shifts, or shifting first right and then left. */
21480 int nb, ne;
21481 if (rs6000_is_valid_mask (operands[2], &nb, &ne, mode) && nb >= ne)
21483 gcc_assert (mode == DImode);
21485 int shift = 63 - nb;
21486 if (expand)
21488 rtx tmp1 = gen_reg_rtx (DImode);
21489 rtx tmp2 = gen_reg_rtx (DImode);
21490 emit_insn (gen_ashldi3 (tmp1, operands[1], GEN_INT (shift)));
21491 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (val << shift)));
21492 emit_insn (gen_lshrdi3 (operands[0], tmp2, GEN_INT (shift)));
21494 else
21496 rtx tmp = gen_rtx_ASHIFT (mode, operands[1], GEN_INT (shift));
21497 tmp = gen_rtx_AND (mode, tmp, GEN_INT (val << shift));
21498 emit_move_insn (operands[0], tmp);
21499 tmp = gen_rtx_LSHIFTRT (mode, operands[0], GEN_INT (shift));
21500 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
21502 return;
21505 /* Otherwise, make a mask2 that cuts out the lowest "hole", and a mask1
21506 that does the rest. */
21507 unsigned HOST_WIDE_INT bit1 = val & -val;
21508 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
21509 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
21510 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
21512 unsigned HOST_WIDE_INT mask1 = -bit3 + bit2 - 1;
21513 unsigned HOST_WIDE_INT mask2 = val + bit3 - bit2;
21515 gcc_assert (rs6000_is_valid_and_mask (GEN_INT (mask2), mode));
21517 /* Two "no-rotate"-and-mask instructions, for SImode. */
21518 if (rs6000_is_valid_and_mask (GEN_INT (mask1), mode))
21520 gcc_assert (mode == SImode);
21522 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
21523 rtx tmp = gen_rtx_AND (mode, operands[1], GEN_INT (mask1));
21524 emit_move_insn (reg, tmp);
21525 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
21526 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
21527 return;
21530 gcc_assert (mode == DImode);
21532 /* Two "no-rotate"-and-mask instructions, for DImode: both are rlwinm
21533 insns; we have to do the first in SImode, because it wraps. */
21534 if (mask2 <= 0xffffffff
21535 && rs6000_is_valid_and_mask (GEN_INT (mask1), SImode))
21537 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
21538 rtx tmp = gen_rtx_AND (SImode, gen_lowpart (SImode, operands[1]),
21539 GEN_INT (mask1));
21540 rtx reg_low = gen_lowpart (SImode, reg);
21541 emit_move_insn (reg_low, tmp);
21542 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
21543 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
21544 return;
21547 /* Two rld* insns: rotate, clear the hole in the middle (which now is
21548 at the top end), rotate back and clear the other hole. */
21549 int right = exact_log2 (bit3);
21550 int left = 64 - right;
21552 /* Rotate the mask too. */
21553 mask1 = (mask1 >> right) | ((bit2 - 1) << left);
21555 if (expand)
21557 rtx tmp1 = gen_reg_rtx (DImode);
21558 rtx tmp2 = gen_reg_rtx (DImode);
21559 rtx tmp3 = gen_reg_rtx (DImode);
21560 emit_insn (gen_rotldi3 (tmp1, operands[1], GEN_INT (left)));
21561 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (mask1)));
21562 emit_insn (gen_rotldi3 (tmp3, tmp2, GEN_INT (right)));
21563 emit_insn (gen_anddi3 (operands[0], tmp3, GEN_INT (mask2)));
21565 else
21567 rtx tmp = gen_rtx_ROTATE (mode, operands[1], GEN_INT (left));
21568 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask1));
21569 emit_move_insn (operands[0], tmp);
21570 tmp = gen_rtx_ROTATE (mode, operands[0], GEN_INT (right));
21571 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask2));
21572 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
21576 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
21577 for lfq and stfq insns iff the registers are hard registers. */
21580 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
21582 /* We might have been passed a SUBREG. */
21583 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
21584 return 0;
21586 /* We might have been passed non floating point registers. */
21587 if (!FP_REGNO_P (REGNO (reg1))
21588 || !FP_REGNO_P (REGNO (reg2)))
21589 return 0;
21591 return (REGNO (reg1) == REGNO (reg2) - 1);
21594 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
21595 addr1 and addr2 must be in consecutive memory locations
21596 (addr2 == addr1 + 8). */
21599 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
21601 rtx addr1, addr2;
21602 unsigned int reg1, reg2;
21603 int offset1, offset2;
21605 /* The mems cannot be volatile. */
21606 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
21607 return 0;
21609 addr1 = XEXP (mem1, 0);
21610 addr2 = XEXP (mem2, 0);
21612 /* Extract an offset (if used) from the first addr. */
21613 if (GET_CODE (addr1) == PLUS)
21615 /* If not a REG, return zero. */
21616 if (GET_CODE (XEXP (addr1, 0)) != REG)
21617 return 0;
21618 else
21620 reg1 = REGNO (XEXP (addr1, 0));
21621 /* The offset must be constant! */
21622 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
21623 return 0;
21624 offset1 = INTVAL (XEXP (addr1, 1));
21627 else if (GET_CODE (addr1) != REG)
21628 return 0;
21629 else
21631 reg1 = REGNO (addr1);
21632 /* This was a simple (mem (reg)) expression. Offset is 0. */
21633 offset1 = 0;
21636 /* And now for the second addr. */
21637 if (GET_CODE (addr2) == PLUS)
21639 /* If not a REG, return zero. */
21640 if (GET_CODE (XEXP (addr2, 0)) != REG)
21641 return 0;
21642 else
21644 reg2 = REGNO (XEXP (addr2, 0));
21645 /* The offset must be constant. */
21646 if (GET_CODE (XEXP (addr2, 1)) != CONST_INT)
21647 return 0;
21648 offset2 = INTVAL (XEXP (addr2, 1));
21651 else if (GET_CODE (addr2) != REG)
21652 return 0;
21653 else
21655 reg2 = REGNO (addr2);
21656 /* This was a simple (mem (reg)) expression. Offset is 0. */
21657 offset2 = 0;
21660 /* Both of these must have the same base register. */
21661 if (reg1 != reg2)
21662 return 0;
21664 /* The offset for the second addr must be 8 more than the first addr. */
21665 if (offset2 != offset1 + 8)
21666 return 0;
21668 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
21669 instructions. */
21670 return 1;
21675 rs6000_secondary_memory_needed_rtx (machine_mode mode)
21677 static bool eliminated = false;
21678 rtx ret;
21680 if (mode != SDmode || TARGET_NO_SDMODE_STACK)
21681 ret = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
21682 else
21684 rtx mem = cfun->machine->sdmode_stack_slot;
21685 gcc_assert (mem != NULL_RTX);
21687 if (!eliminated)
21689 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
21690 cfun->machine->sdmode_stack_slot = mem;
21691 eliminated = true;
21693 ret = mem;
21696 if (TARGET_DEBUG_ADDR)
21698 fprintf (stderr, "\nrs6000_secondary_memory_needed_rtx, mode %s, rtx:\n",
21699 GET_MODE_NAME (mode));
21700 if (!ret)
21701 fprintf (stderr, "\tNULL_RTX\n");
21702 else
21703 debug_rtx (ret);
21706 return ret;
21709 /* Return the mode to be used for memory when a secondary memory
21710 location is needed. For SDmode values we need to use DDmode, in
21711 all other cases we can use the same mode. */
21712 machine_mode
21713 rs6000_secondary_memory_needed_mode (machine_mode mode)
21715 if (lra_in_progress && mode == SDmode)
21716 return DDmode;
21717 return mode;
21720 static tree
21721 rs6000_check_sdmode (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED)
21723 /* Don't walk into types. */
21724 if (*tp == NULL_TREE || *tp == error_mark_node || TYPE_P (*tp))
21726 *walk_subtrees = 0;
21727 return NULL_TREE;
21730 switch (TREE_CODE (*tp))
21732 case VAR_DECL:
21733 case PARM_DECL:
21734 case FIELD_DECL:
21735 case RESULT_DECL:
21736 case SSA_NAME:
21737 case REAL_CST:
21738 case MEM_REF:
21739 case VIEW_CONVERT_EXPR:
21740 if (TYPE_MODE (TREE_TYPE (*tp)) == SDmode)
21741 return *tp;
21742 break;
21743 default:
21744 break;
21747 return NULL_TREE;
21750 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
21751 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
21752 only work on the traditional altivec registers, note if an altivec register
21753 was chosen. */
21755 static enum rs6000_reg_type
21756 register_to_reg_type (rtx reg, bool *is_altivec)
21758 HOST_WIDE_INT regno;
21759 enum reg_class rclass;
21761 if (GET_CODE (reg) == SUBREG)
21762 reg = SUBREG_REG (reg);
21764 if (!REG_P (reg))
21765 return NO_REG_TYPE;
21767 regno = REGNO (reg);
21768 if (regno >= FIRST_PSEUDO_REGISTER)
21770 if (!lra_in_progress && !reload_in_progress && !reload_completed)
21771 return PSEUDO_REG_TYPE;
21773 regno = true_regnum (reg);
21774 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
21775 return PSEUDO_REG_TYPE;
21778 gcc_assert (regno >= 0);
21780 if (is_altivec && ALTIVEC_REGNO_P (regno))
21781 *is_altivec = true;
21783 rclass = rs6000_regno_regclass[regno];
21784 return reg_class_to_reg_type[(int)rclass];
21787 /* Helper function to return the cost of adding a TOC entry address. */
21789 static inline int
21790 rs6000_secondary_reload_toc_costs (addr_mask_type addr_mask)
21792 int ret;
21794 if (TARGET_CMODEL != CMODEL_SMALL)
21795 ret = ((addr_mask & RELOAD_REG_OFFSET) == 0) ? 1 : 2;
21797 else
21798 ret = (TARGET_MINIMAL_TOC) ? 6 : 3;
21800 return ret;
21803 /* Helper function for rs6000_secondary_reload to determine whether the memory
21804 address (ADDR) with a given register class (RCLASS) and machine mode (MODE)
21805 needs reloading. Return negative if the memory is not handled by the memory
21806 helper functions and to try a different reload method, 0 if no additional
21807 instructions are need, and positive to give the extra cost for the
21808 memory. */
21810 static int
21811 rs6000_secondary_reload_memory (rtx addr,
21812 enum reg_class rclass,
21813 machine_mode mode)
21815 int extra_cost = 0;
21816 rtx reg, and_arg, plus_arg0, plus_arg1;
21817 addr_mask_type addr_mask;
21818 const char *type = NULL;
21819 const char *fail_msg = NULL;
21821 if (GPR_REG_CLASS_P (rclass))
21822 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
21824 else if (rclass == FLOAT_REGS)
21825 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
21827 else if (rclass == ALTIVEC_REGS)
21828 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
21830 /* For the combined VSX_REGS, turn off Altivec AND -16. */
21831 else if (rclass == VSX_REGS)
21832 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_VMX]
21833 & ~RELOAD_REG_AND_M16);
21835 /* If the register allocator hasn't made up its mind yet on the register
21836 class to use, settle on defaults to use. */
21837 else if (rclass == NO_REGS)
21839 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_ANY]
21840 & ~RELOAD_REG_AND_M16);
21842 if ((addr_mask & RELOAD_REG_MULTIPLE) != 0)
21843 addr_mask &= ~(RELOAD_REG_INDEXED
21844 | RELOAD_REG_PRE_INCDEC
21845 | RELOAD_REG_PRE_MODIFY);
21848 else
21849 addr_mask = 0;
21851 /* If the register isn't valid in this register class, just return now. */
21852 if ((addr_mask & RELOAD_REG_VALID) == 0)
21854 if (TARGET_DEBUG_ADDR)
21856 fprintf (stderr,
21857 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
21858 "not valid in class\n",
21859 GET_MODE_NAME (mode), reg_class_names[rclass]);
21860 debug_rtx (addr);
21863 return -1;
21866 switch (GET_CODE (addr))
21868 /* Does the register class supports auto update forms for this mode? We
21869 don't need a scratch register, since the powerpc only supports
21870 PRE_INC, PRE_DEC, and PRE_MODIFY. */
21871 case PRE_INC:
21872 case PRE_DEC:
21873 reg = XEXP (addr, 0);
21874 if (!base_reg_operand (addr, GET_MODE (reg)))
21876 fail_msg = "no base register #1";
21877 extra_cost = -1;
21880 else if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
21882 extra_cost = 1;
21883 type = "update";
21885 break;
21887 case PRE_MODIFY:
21888 reg = XEXP (addr, 0);
21889 plus_arg1 = XEXP (addr, 1);
21890 if (!base_reg_operand (reg, GET_MODE (reg))
21891 || GET_CODE (plus_arg1) != PLUS
21892 || !rtx_equal_p (reg, XEXP (plus_arg1, 0)))
21894 fail_msg = "bad PRE_MODIFY";
21895 extra_cost = -1;
21898 else if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
21900 extra_cost = 1;
21901 type = "update";
21903 break;
21905 /* Do we need to simulate AND -16 to clear the bottom address bits used
21906 in VMX load/stores? Only allow the AND for vector sizes. */
21907 case AND:
21908 and_arg = XEXP (addr, 0);
21909 if (GET_MODE_SIZE (mode) != 16
21910 || GET_CODE (XEXP (addr, 1)) != CONST_INT
21911 || INTVAL (XEXP (addr, 1)) != -16)
21913 fail_msg = "bad Altivec AND #1";
21914 extra_cost = -1;
21917 if (rclass != ALTIVEC_REGS)
21919 if (legitimate_indirect_address_p (and_arg, false))
21920 extra_cost = 1;
21922 else if (legitimate_indexed_address_p (and_arg, false))
21923 extra_cost = 2;
21925 else
21927 fail_msg = "bad Altivec AND #2";
21928 extra_cost = -1;
21931 type = "and";
21933 break;
21935 /* If this is an indirect address, make sure it is a base register. */
21936 case REG:
21937 case SUBREG:
21938 if (!legitimate_indirect_address_p (addr, false))
21940 extra_cost = 1;
21941 type = "move";
21943 break;
21945 /* If this is an indexed address, make sure the register class can handle
21946 indexed addresses for this mode. */
21947 case PLUS:
21948 plus_arg0 = XEXP (addr, 0);
21949 plus_arg1 = XEXP (addr, 1);
21951 /* (plus (plus (reg) (constant)) (constant)) is generated during
21952 push_reload processing, so handle it now. */
21953 if (GET_CODE (plus_arg0) == PLUS && CONST_INT_P (plus_arg1))
21955 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
21957 extra_cost = 1;
21958 type = "offset";
21962 /* (plus (plus (reg) (constant)) (reg)) is also generated during
21963 push_reload processing, so handle it now. */
21964 else if (GET_CODE (plus_arg0) == PLUS && REG_P (plus_arg1))
21966 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
21968 extra_cost = 1;
21969 type = "indexed #2";
21973 else if (!base_reg_operand (plus_arg0, GET_MODE (plus_arg0)))
21975 fail_msg = "no base register #2";
21976 extra_cost = -1;
21979 else if (int_reg_operand (plus_arg1, GET_MODE (plus_arg1)))
21981 if ((addr_mask & RELOAD_REG_INDEXED) == 0
21982 || !legitimate_indexed_address_p (addr, false))
21984 extra_cost = 1;
21985 type = "indexed";
21989 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0
21990 && CONST_INT_P (plus_arg1))
21992 if (!quad_address_offset_p (INTVAL (plus_arg1)))
21994 extra_cost = 1;
21995 type = "vector d-form offset";
21999 /* Make sure the register class can handle offset addresses. */
22000 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
22002 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
22004 extra_cost = 1;
22005 type = "offset #2";
22009 else
22011 fail_msg = "bad PLUS";
22012 extra_cost = -1;
22015 break;
22017 case LO_SUM:
22018 /* Quad offsets are restricted and can't handle normal addresses. */
22019 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
22021 extra_cost = -1;
22022 type = "vector d-form lo_sum";
22025 else if (!legitimate_lo_sum_address_p (mode, addr, false))
22027 fail_msg = "bad LO_SUM";
22028 extra_cost = -1;
22031 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
22033 extra_cost = 1;
22034 type = "lo_sum";
22036 break;
22038 /* Static addresses need to create a TOC entry. */
22039 case CONST:
22040 case SYMBOL_REF:
22041 case LABEL_REF:
22042 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
22044 extra_cost = -1;
22045 type = "vector d-form lo_sum #2";
22048 else
22050 type = "address";
22051 extra_cost = rs6000_secondary_reload_toc_costs (addr_mask);
22053 break;
22055 /* TOC references look like offsetable memory. */
22056 case UNSPEC:
22057 if (TARGET_CMODEL == CMODEL_SMALL || XINT (addr, 1) != UNSPEC_TOCREL)
22059 fail_msg = "bad UNSPEC";
22060 extra_cost = -1;
22063 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
22065 extra_cost = -1;
22066 type = "vector d-form lo_sum #3";
22069 else if ((addr_mask & RELOAD_REG_OFFSET) == 0)
22071 extra_cost = 1;
22072 type = "toc reference";
22074 break;
22076 default:
22078 fail_msg = "bad address";
22079 extra_cost = -1;
22083 if (TARGET_DEBUG_ADDR /* && extra_cost != 0 */)
22085 if (extra_cost < 0)
22086 fprintf (stderr,
22087 "rs6000_secondary_reload_memory error: mode = %s, "
22088 "class = %s, addr_mask = '%s', %s\n",
22089 GET_MODE_NAME (mode),
22090 reg_class_names[rclass],
22091 rs6000_debug_addr_mask (addr_mask, false),
22092 (fail_msg != NULL) ? fail_msg : "<bad address>");
22094 else
22095 fprintf (stderr,
22096 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
22097 "addr_mask = '%s', extra cost = %d, %s\n",
22098 GET_MODE_NAME (mode),
22099 reg_class_names[rclass],
22100 rs6000_debug_addr_mask (addr_mask, false),
22101 extra_cost,
22102 (type) ? type : "<none>");
22104 debug_rtx (addr);
22107 return extra_cost;
22110 /* Helper function for rs6000_secondary_reload to return true if a move to a
22111 different register classe is really a simple move. */
22113 static bool
22114 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type,
22115 enum rs6000_reg_type from_type,
22116 machine_mode mode)
22118 int size = GET_MODE_SIZE (mode);
22120 /* Add support for various direct moves available. In this function, we only
22121 look at cases where we don't need any extra registers, and one or more
22122 simple move insns are issued. Originally small integers are not allowed
22123 in FPR/VSX registers. Single precision binary floating is not a simple
22124 move because we need to convert to the single precision memory layout.
22125 The 4-byte SDmode can be moved. TDmode values are disallowed since they
22126 need special direct move handling, which we do not support yet. */
22127 if (TARGET_DIRECT_MOVE
22128 && ((to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
22129 || (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)))
22131 if (TARGET_POWERPC64)
22133 /* ISA 2.07: MTVSRD or MVFVSRD. */
22134 if (size == 8)
22135 return true;
22137 /* ISA 3.0: MTVSRDD or MFVSRD + MFVSRLD. */
22138 if (size == 16 && TARGET_P9_VECTOR && mode != TDmode)
22139 return true;
22142 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
22143 if (TARGET_VSX_SMALL_INTEGER)
22145 if (mode == SImode)
22146 return true;
22148 if (TARGET_P9_VECTOR && (mode == HImode || mode == QImode))
22149 return true;
22152 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
22153 if (mode == SDmode)
22154 return true;
22157 /* Power6+: MFTGPR or MFFGPR. */
22158 else if (TARGET_MFPGPR && TARGET_POWERPC64 && size == 8
22159 && ((to_type == GPR_REG_TYPE && from_type == FPR_REG_TYPE)
22160 || (to_type == FPR_REG_TYPE && from_type == GPR_REG_TYPE)))
22161 return true;
22163 /* Move to/from SPR. */
22164 else if ((size == 4 || (TARGET_POWERPC64 && size == 8))
22165 && ((to_type == GPR_REG_TYPE && from_type == SPR_REG_TYPE)
22166 || (to_type == SPR_REG_TYPE && from_type == GPR_REG_TYPE)))
22167 return true;
22169 return false;
22172 /* Direct move helper function for rs6000_secondary_reload, handle all of the
22173 special direct moves that involve allocating an extra register, return the
22174 insn code of the helper function if there is such a function or
22175 CODE_FOR_nothing if not. */
22177 static bool
22178 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type,
22179 enum rs6000_reg_type from_type,
22180 machine_mode mode,
22181 secondary_reload_info *sri,
22182 bool altivec_p)
22184 bool ret = false;
22185 enum insn_code icode = CODE_FOR_nothing;
22186 int cost = 0;
22187 int size = GET_MODE_SIZE (mode);
22189 if (TARGET_POWERPC64 && size == 16)
22191 /* Handle moving 128-bit values from GPRs to VSX point registers on
22192 ISA 2.07 (power8, power9) when running in 64-bit mode using
22193 XXPERMDI to glue the two 64-bit values back together. */
22194 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
22196 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
22197 icode = reg_addr[mode].reload_vsx_gpr;
22200 /* Handle moving 128-bit values from VSX point registers to GPRs on
22201 ISA 2.07 when running in 64-bit mode using XXPERMDI to get access to the
22202 bottom 64-bit value. */
22203 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
22205 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
22206 icode = reg_addr[mode].reload_gpr_vsx;
22210 else if (TARGET_POWERPC64 && mode == SFmode)
22212 if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
22214 cost = 3; /* xscvdpspn, mfvsrd, and. */
22215 icode = reg_addr[mode].reload_gpr_vsx;
22218 else if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
22220 cost = 2; /* mtvsrz, xscvspdpn. */
22221 icode = reg_addr[mode].reload_vsx_gpr;
22225 else if (!TARGET_POWERPC64 && size == 8)
22227 /* Handle moving 64-bit values from GPRs to floating point registers on
22228 ISA 2.07 when running in 32-bit mode using FMRGOW to glue the two
22229 32-bit values back together. Altivec register classes must be handled
22230 specially since a different instruction is used, and the secondary
22231 reload support requires a single instruction class in the scratch
22232 register constraint. However, right now TFmode is not allowed in
22233 Altivec registers, so the pattern will never match. */
22234 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE && !altivec_p)
22236 cost = 3; /* 2 mtvsrwz's, 1 fmrgow. */
22237 icode = reg_addr[mode].reload_fpr_gpr;
22241 if (icode != CODE_FOR_nothing)
22243 ret = true;
22244 if (sri)
22246 sri->icode = icode;
22247 sri->extra_cost = cost;
22251 return ret;
22254 /* Return whether a move between two register classes can be done either
22255 directly (simple move) or via a pattern that uses a single extra temporary
22256 (using ISA 2.07's direct move in this case. */
22258 static bool
22259 rs6000_secondary_reload_move (enum rs6000_reg_type to_type,
22260 enum rs6000_reg_type from_type,
22261 machine_mode mode,
22262 secondary_reload_info *sri,
22263 bool altivec_p)
22265 /* Fall back to load/store reloads if either type is not a register. */
22266 if (to_type == NO_REG_TYPE || from_type == NO_REG_TYPE)
22267 return false;
22269 /* If we haven't allocated registers yet, assume the move can be done for the
22270 standard register types. */
22271 if ((to_type == PSEUDO_REG_TYPE && from_type == PSEUDO_REG_TYPE)
22272 || (to_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (from_type))
22273 || (from_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (to_type)))
22274 return true;
22276 /* Moves to the same set of registers is a simple move for non-specialized
22277 registers. */
22278 if (to_type == from_type && IS_STD_REG_TYPE (to_type))
22279 return true;
22281 /* Check whether a simple move can be done directly. */
22282 if (rs6000_secondary_reload_simple_move (to_type, from_type, mode))
22284 if (sri)
22286 sri->icode = CODE_FOR_nothing;
22287 sri->extra_cost = 0;
22289 return true;
22292 /* Now check if we can do it in a few steps. */
22293 return rs6000_secondary_reload_direct_move (to_type, from_type, mode, sri,
22294 altivec_p);
22297 /* Inform reload about cases where moving X with a mode MODE to a register in
22298 RCLASS requires an extra scratch or immediate register. Return the class
22299 needed for the immediate register.
22301 For VSX and Altivec, we may need a register to convert sp+offset into
22302 reg+sp.
22304 For misaligned 64-bit gpr loads and stores we need a register to
22305 convert an offset address to indirect. */
22307 static reg_class_t
22308 rs6000_secondary_reload (bool in_p,
22309 rtx x,
22310 reg_class_t rclass_i,
22311 machine_mode mode,
22312 secondary_reload_info *sri)
22314 enum reg_class rclass = (enum reg_class) rclass_i;
22315 reg_class_t ret = ALL_REGS;
22316 enum insn_code icode;
22317 bool default_p = false;
22318 bool done_p = false;
22320 /* Allow subreg of memory before/during reload. */
22321 bool memory_p = (MEM_P (x)
22322 || (!reload_completed && GET_CODE (x) == SUBREG
22323 && MEM_P (SUBREG_REG (x))));
22325 sri->icode = CODE_FOR_nothing;
22326 sri->t_icode = CODE_FOR_nothing;
22327 sri->extra_cost = 0;
22328 icode = ((in_p)
22329 ? reg_addr[mode].reload_load
22330 : reg_addr[mode].reload_store);
22332 if (REG_P (x) || register_operand (x, mode))
22334 enum rs6000_reg_type to_type = reg_class_to_reg_type[(int)rclass];
22335 bool altivec_p = (rclass == ALTIVEC_REGS);
22336 enum rs6000_reg_type from_type = register_to_reg_type (x, &altivec_p);
22338 if (!in_p)
22339 std::swap (to_type, from_type);
22341 /* Can we do a direct move of some sort? */
22342 if (rs6000_secondary_reload_move (to_type, from_type, mode, sri,
22343 altivec_p))
22345 icode = (enum insn_code)sri->icode;
22346 default_p = false;
22347 done_p = true;
22348 ret = NO_REGS;
22352 /* Make sure 0.0 is not reloaded or forced into memory. */
22353 if (x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
22355 ret = NO_REGS;
22356 default_p = false;
22357 done_p = true;
22360 /* If this is a scalar floating point value and we want to load it into the
22361 traditional Altivec registers, do it via a move via a traditional floating
22362 point register, unless we have D-form addressing. Also make sure that
22363 non-zero constants use a FPR. */
22364 if (!done_p && reg_addr[mode].scalar_in_vmx_p
22365 && !mode_supports_vmx_dform (mode)
22366 && (rclass == VSX_REGS || rclass == ALTIVEC_REGS)
22367 && (memory_p || (GET_CODE (x) == CONST_DOUBLE)))
22369 ret = FLOAT_REGS;
22370 default_p = false;
22371 done_p = true;
22374 /* Handle reload of load/stores if we have reload helper functions. */
22375 if (!done_p && icode != CODE_FOR_nothing && memory_p)
22377 int extra_cost = rs6000_secondary_reload_memory (XEXP (x, 0), rclass,
22378 mode);
22380 if (extra_cost >= 0)
22382 done_p = true;
22383 ret = NO_REGS;
22384 if (extra_cost > 0)
22386 sri->extra_cost = extra_cost;
22387 sri->icode = icode;
22392 /* Handle unaligned loads and stores of integer registers. */
22393 if (!done_p && TARGET_POWERPC64
22394 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
22395 && memory_p
22396 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
22398 rtx addr = XEXP (x, 0);
22399 rtx off = address_offset (addr);
22401 if (off != NULL_RTX)
22403 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
22404 unsigned HOST_WIDE_INT offset = INTVAL (off);
22406 /* We need a secondary reload when our legitimate_address_p
22407 says the address is good (as otherwise the entire address
22408 will be reloaded), and the offset is not a multiple of
22409 four or we have an address wrap. Address wrap will only
22410 occur for LO_SUMs since legitimate_offset_address_p
22411 rejects addresses for 16-byte mems that will wrap. */
22412 if (GET_CODE (addr) == LO_SUM
22413 ? (1 /* legitimate_address_p allows any offset for lo_sum */
22414 && ((offset & 3) != 0
22415 || ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra))
22416 : (offset + 0x8000 < 0x10000 - extra /* legitimate_address_p */
22417 && (offset & 3) != 0))
22419 /* -m32 -mpowerpc64 needs to use a 32-bit scratch register. */
22420 if (in_p)
22421 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_load
22422 : CODE_FOR_reload_di_load);
22423 else
22424 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_store
22425 : CODE_FOR_reload_di_store);
22426 sri->extra_cost = 2;
22427 ret = NO_REGS;
22428 done_p = true;
22430 else
22431 default_p = true;
22433 else
22434 default_p = true;
22437 if (!done_p && !TARGET_POWERPC64
22438 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
22439 && memory_p
22440 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
22442 rtx addr = XEXP (x, 0);
22443 rtx off = address_offset (addr);
22445 if (off != NULL_RTX)
22447 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
22448 unsigned HOST_WIDE_INT offset = INTVAL (off);
22450 /* We need a secondary reload when our legitimate_address_p
22451 says the address is good (as otherwise the entire address
22452 will be reloaded), and we have a wrap.
22454 legitimate_lo_sum_address_p allows LO_SUM addresses to
22455 have any offset so test for wrap in the low 16 bits.
22457 legitimate_offset_address_p checks for the range
22458 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
22459 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
22460 [0x7ff4,0x7fff] respectively, so test for the
22461 intersection of these ranges, [0x7ffc,0x7fff] and
22462 [0x7ff4,0x7ff7] respectively.
22464 Note that the address we see here may have been
22465 manipulated by legitimize_reload_address. */
22466 if (GET_CODE (addr) == LO_SUM
22467 ? ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra
22468 : offset - (0x8000 - extra) < UNITS_PER_WORD)
22470 if (in_p)
22471 sri->icode = CODE_FOR_reload_si_load;
22472 else
22473 sri->icode = CODE_FOR_reload_si_store;
22474 sri->extra_cost = 2;
22475 ret = NO_REGS;
22476 done_p = true;
22478 else
22479 default_p = true;
22481 else
22482 default_p = true;
22485 if (!done_p)
22486 default_p = true;
22488 if (default_p)
22489 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
22491 gcc_assert (ret != ALL_REGS);
22493 if (TARGET_DEBUG_ADDR)
22495 fprintf (stderr,
22496 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
22497 "mode = %s",
22498 reg_class_names[ret],
22499 in_p ? "true" : "false",
22500 reg_class_names[rclass],
22501 GET_MODE_NAME (mode));
22503 if (reload_completed)
22504 fputs (", after reload", stderr);
22506 if (!done_p)
22507 fputs (", done_p not set", stderr);
22509 if (default_p)
22510 fputs (", default secondary reload", stderr);
22512 if (sri->icode != CODE_FOR_nothing)
22513 fprintf (stderr, ", reload func = %s, extra cost = %d",
22514 insn_data[sri->icode].name, sri->extra_cost);
22516 else if (sri->extra_cost > 0)
22517 fprintf (stderr, ", extra cost = %d", sri->extra_cost);
22519 fputs ("\n", stderr);
22520 debug_rtx (x);
22523 return ret;
22526 /* Better tracing for rs6000_secondary_reload_inner. */
22528 static void
22529 rs6000_secondary_reload_trace (int line, rtx reg, rtx mem, rtx scratch,
22530 bool store_p)
22532 rtx set, clobber;
22534 gcc_assert (reg != NULL_RTX && mem != NULL_RTX && scratch != NULL_RTX);
22536 fprintf (stderr, "rs6000_secondary_reload_inner:%d, type = %s\n", line,
22537 store_p ? "store" : "load");
22539 if (store_p)
22540 set = gen_rtx_SET (mem, reg);
22541 else
22542 set = gen_rtx_SET (reg, mem);
22544 clobber = gen_rtx_CLOBBER (VOIDmode, scratch);
22545 debug_rtx (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber)));
22548 static void rs6000_secondary_reload_fail (int, rtx, rtx, rtx, bool)
22549 ATTRIBUTE_NORETURN;
22551 static void
22552 rs6000_secondary_reload_fail (int line, rtx reg, rtx mem, rtx scratch,
22553 bool store_p)
22555 rs6000_secondary_reload_trace (line, reg, mem, scratch, store_p);
22556 gcc_unreachable ();
22559 /* Fixup reload addresses for values in GPR, FPR, and VMX registers that have
22560 reload helper functions. These were identified in
22561 rs6000_secondary_reload_memory, and if reload decided to use the secondary
22562 reload, it calls the insns:
22563 reload_<RELOAD:mode>_<P:mptrsize>_store
22564 reload_<RELOAD:mode>_<P:mptrsize>_load
22566 which in turn calls this function, to do whatever is necessary to create
22567 valid addresses. */
22569 void
22570 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
22572 int regno = true_regnum (reg);
22573 machine_mode mode = GET_MODE (reg);
22574 addr_mask_type addr_mask;
22575 rtx addr;
22576 rtx new_addr;
22577 rtx op_reg, op0, op1;
22578 rtx and_op;
22579 rtx cc_clobber;
22580 rtvec rv;
22582 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER || !MEM_P (mem)
22583 || !base_reg_operand (scratch, GET_MODE (scratch)))
22584 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
22586 if (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO))
22587 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
22589 else if (IN_RANGE (regno, FIRST_FPR_REGNO, LAST_FPR_REGNO))
22590 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
22592 else if (IN_RANGE (regno, FIRST_ALTIVEC_REGNO, LAST_ALTIVEC_REGNO))
22593 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
22595 else
22596 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
22598 /* Make sure the mode is valid in this register class. */
22599 if ((addr_mask & RELOAD_REG_VALID) == 0)
22600 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
22602 if (TARGET_DEBUG_ADDR)
22603 rs6000_secondary_reload_trace (__LINE__, reg, mem, scratch, store_p);
22605 new_addr = addr = XEXP (mem, 0);
22606 switch (GET_CODE (addr))
22608 /* Does the register class support auto update forms for this mode? If
22609 not, do the update now. We don't need a scratch register, since the
22610 powerpc only supports PRE_INC, PRE_DEC, and PRE_MODIFY. */
22611 case PRE_INC:
22612 case PRE_DEC:
22613 op_reg = XEXP (addr, 0);
22614 if (!base_reg_operand (op_reg, Pmode))
22615 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
22617 if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
22619 emit_insn (gen_add2_insn (op_reg, GEN_INT (GET_MODE_SIZE (mode))));
22620 new_addr = op_reg;
22622 break;
22624 case PRE_MODIFY:
22625 op0 = XEXP (addr, 0);
22626 op1 = XEXP (addr, 1);
22627 if (!base_reg_operand (op0, Pmode)
22628 || GET_CODE (op1) != PLUS
22629 || !rtx_equal_p (op0, XEXP (op1, 0)))
22630 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
22632 if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
22634 emit_insn (gen_rtx_SET (op0, op1));
22635 new_addr = reg;
22637 break;
22639 /* Do we need to simulate AND -16 to clear the bottom address bits used
22640 in VMX load/stores? */
22641 case AND:
22642 op0 = XEXP (addr, 0);
22643 op1 = XEXP (addr, 1);
22644 if ((addr_mask & RELOAD_REG_AND_M16) == 0)
22646 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
22647 op_reg = op0;
22649 else if (GET_CODE (op1) == PLUS)
22651 emit_insn (gen_rtx_SET (scratch, op1));
22652 op_reg = scratch;
22655 else
22656 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
22658 and_op = gen_rtx_AND (GET_MODE (scratch), op_reg, op1);
22659 cc_clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (CCmode));
22660 rv = gen_rtvec (2, gen_rtx_SET (scratch, and_op), cc_clobber);
22661 emit_insn (gen_rtx_PARALLEL (VOIDmode, rv));
22662 new_addr = scratch;
22664 break;
22666 /* If this is an indirect address, make sure it is a base register. */
22667 case REG:
22668 case SUBREG:
22669 if (!base_reg_operand (addr, GET_MODE (addr)))
22671 emit_insn (gen_rtx_SET (scratch, addr));
22672 new_addr = scratch;
22674 break;
22676 /* If this is an indexed address, make sure the register class can handle
22677 indexed addresses for this mode. */
22678 case PLUS:
22679 op0 = XEXP (addr, 0);
22680 op1 = XEXP (addr, 1);
22681 if (!base_reg_operand (op0, Pmode))
22682 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
22684 else if (int_reg_operand (op1, Pmode))
22686 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
22688 emit_insn (gen_rtx_SET (scratch, addr));
22689 new_addr = scratch;
22693 else if (mode_supports_vsx_dform_quad (mode) && CONST_INT_P (op1))
22695 if (((addr_mask & RELOAD_REG_QUAD_OFFSET) == 0)
22696 || !quad_address_p (addr, mode, false))
22698 emit_insn (gen_rtx_SET (scratch, addr));
22699 new_addr = scratch;
22703 /* Make sure the register class can handle offset addresses. */
22704 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
22706 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
22708 emit_insn (gen_rtx_SET (scratch, addr));
22709 new_addr = scratch;
22713 else
22714 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
22716 break;
22718 case LO_SUM:
22719 op0 = XEXP (addr, 0);
22720 op1 = XEXP (addr, 1);
22721 if (!base_reg_operand (op0, Pmode))
22722 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
22724 else if (int_reg_operand (op1, Pmode))
22726 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
22728 emit_insn (gen_rtx_SET (scratch, addr));
22729 new_addr = scratch;
22733 /* Quad offsets are restricted and can't handle normal addresses. */
22734 else if (mode_supports_vsx_dform_quad (mode))
22736 emit_insn (gen_rtx_SET (scratch, addr));
22737 new_addr = scratch;
22740 /* Make sure the register class can handle offset addresses. */
22741 else if (legitimate_lo_sum_address_p (mode, addr, false))
22743 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
22745 emit_insn (gen_rtx_SET (scratch, addr));
22746 new_addr = scratch;
22750 else
22751 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
22753 break;
22755 case SYMBOL_REF:
22756 case CONST:
22757 case LABEL_REF:
22758 rs6000_emit_move (scratch, addr, Pmode);
22759 new_addr = scratch;
22760 break;
22762 default:
22763 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
22766 /* Adjust the address if it changed. */
22767 if (addr != new_addr)
22769 mem = replace_equiv_address_nv (mem, new_addr);
22770 if (TARGET_DEBUG_ADDR)
22771 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
22774 /* Now create the move. */
22775 if (store_p)
22776 emit_insn (gen_rtx_SET (mem, reg));
22777 else
22778 emit_insn (gen_rtx_SET (reg, mem));
22780 return;
22783 /* Convert reloads involving 64-bit gprs and misaligned offset
22784 addressing, or multiple 32-bit gprs and offsets that are too large,
22785 to use indirect addressing. */
22787 void
22788 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
22790 int regno = true_regnum (reg);
22791 enum reg_class rclass;
22792 rtx addr;
22793 rtx scratch_or_premodify = scratch;
22795 if (TARGET_DEBUG_ADDR)
22797 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
22798 store_p ? "store" : "load");
22799 fprintf (stderr, "reg:\n");
22800 debug_rtx (reg);
22801 fprintf (stderr, "mem:\n");
22802 debug_rtx (mem);
22803 fprintf (stderr, "scratch:\n");
22804 debug_rtx (scratch);
22807 gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
22808 gcc_assert (GET_CODE (mem) == MEM);
22809 rclass = REGNO_REG_CLASS (regno);
22810 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
22811 addr = XEXP (mem, 0);
22813 if (GET_CODE (addr) == PRE_MODIFY)
22815 gcc_assert (REG_P (XEXP (addr, 0))
22816 && GET_CODE (XEXP (addr, 1)) == PLUS
22817 && XEXP (XEXP (addr, 1), 0) == XEXP (addr, 0));
22818 scratch_or_premodify = XEXP (addr, 0);
22819 if (!HARD_REGISTER_P (scratch_or_premodify))
22820 /* If we have a pseudo here then reload will have arranged
22821 to have it replaced, but only in the original insn.
22822 Use the replacement here too. */
22823 scratch_or_premodify = find_replacement (&XEXP (addr, 0));
22825 /* RTL emitted by rs6000_secondary_reload_gpr uses RTL
22826 expressions from the original insn, without unsharing them.
22827 Any RTL that points into the original insn will of course
22828 have register replacements applied. That is why we don't
22829 need to look for replacements under the PLUS. */
22830 addr = XEXP (addr, 1);
22832 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
22834 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
22836 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
22838 /* Now create the move. */
22839 if (store_p)
22840 emit_insn (gen_rtx_SET (mem, reg));
22841 else
22842 emit_insn (gen_rtx_SET (reg, mem));
22844 return;
22847 /* Allocate a 64-bit stack slot to be used for copying SDmode values through if
22848 this function has any SDmode references. If we are on a power7 or later, we
22849 don't need the 64-bit stack slot since the LFIWZX and STIFWX instructions
22850 can load/store the value. */
22852 static void
22853 rs6000_alloc_sdmode_stack_slot (void)
22855 tree t;
22856 basic_block bb;
22857 gimple_stmt_iterator gsi;
22859 gcc_assert (cfun->machine->sdmode_stack_slot == NULL_RTX);
22860 /* We use a different approach for dealing with the secondary
22861 memory in LRA. */
22862 if (ira_use_lra_p)
22863 return;
22865 if (TARGET_NO_SDMODE_STACK)
22866 return;
22868 FOR_EACH_BB_FN (bb, cfun)
22869 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
22871 tree ret = walk_gimple_op (gsi_stmt (gsi), rs6000_check_sdmode, NULL);
22872 if (ret)
22874 rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
22875 cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
22876 SDmode, 0);
22877 return;
22881 /* Check for any SDmode parameters of the function. */
22882 for (t = DECL_ARGUMENTS (cfun->decl); t; t = DECL_CHAIN (t))
22884 if (TREE_TYPE (t) == error_mark_node)
22885 continue;
22887 if (TYPE_MODE (TREE_TYPE (t)) == SDmode
22888 || TYPE_MODE (DECL_ARG_TYPE (t)) == SDmode)
22890 rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
22891 cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
22892 SDmode, 0);
22893 return;
22898 static void
22899 rs6000_instantiate_decls (void)
22901 if (cfun->machine->sdmode_stack_slot != NULL_RTX)
22902 instantiate_decl_rtl (cfun->machine->sdmode_stack_slot);
22905 /* Given an rtx X being reloaded into a reg required to be
22906 in class CLASS, return the class of reg to actually use.
22907 In general this is just CLASS; but on some machines
22908 in some cases it is preferable to use a more restrictive class.
22910 On the RS/6000, we have to return NO_REGS when we want to reload a
22911 floating-point CONST_DOUBLE to force it to be copied to memory.
22913 We also don't want to reload integer values into floating-point
22914 registers if we can at all help it. In fact, this can
22915 cause reload to die, if it tries to generate a reload of CTR
22916 into a FP register and discovers it doesn't have the memory location
22917 required.
22919 ??? Would it be a good idea to have reload do the converse, that is
22920 try to reload floating modes into FP registers if possible?
22923 static enum reg_class
22924 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
22926 machine_mode mode = GET_MODE (x);
22927 bool is_constant = CONSTANT_P (x);
22929 /* If a mode can't go in FPR/ALTIVEC/VSX registers, don't return a preferred
22930 reload class for it. */
22931 if ((rclass == ALTIVEC_REGS || rclass == VSX_REGS)
22932 && (reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID) == 0)
22933 return NO_REGS;
22935 if ((rclass == FLOAT_REGS || rclass == VSX_REGS)
22936 && (reg_addr[mode].addr_mask[RELOAD_REG_FPR] & RELOAD_REG_VALID) == 0)
22937 return NO_REGS;
22939 /* For VSX, see if we should prefer FLOAT_REGS or ALTIVEC_REGS. Do not allow
22940 the reloading of address expressions using PLUS into floating point
22941 registers. */
22942 if (TARGET_VSX && VSX_REG_CLASS_P (rclass) && GET_CODE (x) != PLUS)
22944 if (is_constant)
22946 /* Zero is always allowed in all VSX registers. */
22947 if (x == CONST0_RTX (mode))
22948 return rclass;
22950 /* If this is a vector constant that can be formed with a few Altivec
22951 instructions, we want altivec registers. */
22952 if (GET_CODE (x) == CONST_VECTOR && easy_vector_constant (x, mode))
22953 return ALTIVEC_REGS;
22955 /* If this is an integer constant that can easily be loaded into
22956 vector registers, allow it. */
22957 if (CONST_INT_P (x))
22959 HOST_WIDE_INT value = INTVAL (x);
22961 /* ISA 2.07 can generate -1 in all registers with XXLORC. ISA
22962 2.06 can generate it in the Altivec registers with
22963 VSPLTI<x>. */
22964 if (value == -1)
22966 if (TARGET_P8_VECTOR)
22967 return rclass;
22968 else if (rclass == ALTIVEC_REGS || rclass == VSX_REGS)
22969 return ALTIVEC_REGS;
22970 else
22971 return NO_REGS;
22974 /* ISA 3.0 can load -128..127 using the XXSPLTIB instruction and
22975 a sign extend in the Altivec registers. */
22976 if (IN_RANGE (value, -128, 127) && TARGET_P9_VECTOR
22977 && TARGET_VSX_SMALL_INTEGER
22978 && (rclass == ALTIVEC_REGS || rclass == VSX_REGS))
22979 return ALTIVEC_REGS;
22982 /* Force constant to memory. */
22983 return NO_REGS;
22986 /* D-form addressing can easily reload the value. */
22987 if (mode_supports_vmx_dform (mode)
22988 || mode_supports_vsx_dform_quad (mode))
22989 return rclass;
22991 /* If this is a scalar floating point value and we don't have D-form
22992 addressing, prefer the traditional floating point registers so that we
22993 can use D-form (register+offset) addressing. */
22994 if (rclass == VSX_REGS
22995 && (mode == SFmode || GET_MODE_SIZE (mode) == 8))
22996 return FLOAT_REGS;
22998 /* Prefer the Altivec registers if Altivec is handling the vector
22999 operations (i.e. V16QI, V8HI, and V4SI), or if we prefer Altivec
23000 loads. */
23001 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode)
23002 || mode == V1TImode)
23003 return ALTIVEC_REGS;
23005 return rclass;
23008 if (is_constant || GET_CODE (x) == PLUS)
23010 if (reg_class_subset_p (GENERAL_REGS, rclass))
23011 return GENERAL_REGS;
23012 if (reg_class_subset_p (BASE_REGS, rclass))
23013 return BASE_REGS;
23014 return NO_REGS;
23017 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == NON_SPECIAL_REGS)
23018 return GENERAL_REGS;
23020 return rclass;
23023 /* Debug version of rs6000_preferred_reload_class. */
23024 static enum reg_class
23025 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
23027 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
23029 fprintf (stderr,
23030 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
23031 "mode = %s, x:\n",
23032 reg_class_names[ret], reg_class_names[rclass],
23033 GET_MODE_NAME (GET_MODE (x)));
23034 debug_rtx (x);
23036 return ret;
23039 /* If we are copying between FP or AltiVec registers and anything else, we need
23040 a memory location. The exception is when we are targeting ppc64 and the
23041 move to/from fpr to gpr instructions are available. Also, under VSX, you
23042 can copy vector registers from the FP register set to the Altivec register
23043 set and vice versa. */
23045 static bool
23046 rs6000_secondary_memory_needed (enum reg_class from_class,
23047 enum reg_class to_class,
23048 machine_mode mode)
23050 enum rs6000_reg_type from_type, to_type;
23051 bool altivec_p = ((from_class == ALTIVEC_REGS)
23052 || (to_class == ALTIVEC_REGS));
23054 /* If a simple/direct move is available, we don't need secondary memory */
23055 from_type = reg_class_to_reg_type[(int)from_class];
23056 to_type = reg_class_to_reg_type[(int)to_class];
23058 if (rs6000_secondary_reload_move (to_type, from_type, mode,
23059 (secondary_reload_info *)0, altivec_p))
23060 return false;
23062 /* If we have a floating point or vector register class, we need to use
23063 memory to transfer the data. */
23064 if (IS_FP_VECT_REG_TYPE (from_type) || IS_FP_VECT_REG_TYPE (to_type))
23065 return true;
23067 return false;
23070 /* Debug version of rs6000_secondary_memory_needed. */
23071 static bool
23072 rs6000_debug_secondary_memory_needed (enum reg_class from_class,
23073 enum reg_class to_class,
23074 machine_mode mode)
23076 bool ret = rs6000_secondary_memory_needed (from_class, to_class, mode);
23078 fprintf (stderr,
23079 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
23080 "to_class = %s, mode = %s\n",
23081 ret ? "true" : "false",
23082 reg_class_names[from_class],
23083 reg_class_names[to_class],
23084 GET_MODE_NAME (mode));
23086 return ret;
23089 /* Return the register class of a scratch register needed to copy IN into
23090 or out of a register in RCLASS in MODE. If it can be done directly,
23091 NO_REGS is returned. */
23093 static enum reg_class
23094 rs6000_secondary_reload_class (enum reg_class rclass, machine_mode mode,
23095 rtx in)
23097 int regno;
23099 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
23100 #if TARGET_MACHO
23101 && MACHOPIC_INDIRECT
23102 #endif
23105 /* We cannot copy a symbolic operand directly into anything
23106 other than BASE_REGS for TARGET_ELF. So indicate that a
23107 register from BASE_REGS is needed as an intermediate
23108 register.
23110 On Darwin, pic addresses require a load from memory, which
23111 needs a base register. */
23112 if (rclass != BASE_REGS
23113 && (GET_CODE (in) == SYMBOL_REF
23114 || GET_CODE (in) == HIGH
23115 || GET_CODE (in) == LABEL_REF
23116 || GET_CODE (in) == CONST))
23117 return BASE_REGS;
23120 if (GET_CODE (in) == REG)
23122 regno = REGNO (in);
23123 if (regno >= FIRST_PSEUDO_REGISTER)
23125 regno = true_regnum (in);
23126 if (regno >= FIRST_PSEUDO_REGISTER)
23127 regno = -1;
23130 else if (GET_CODE (in) == SUBREG)
23132 regno = true_regnum (in);
23133 if (regno >= FIRST_PSEUDO_REGISTER)
23134 regno = -1;
23136 else
23137 regno = -1;
23139 /* If we have VSX register moves, prefer moving scalar values between
23140 Altivec registers and GPR by going via an FPR (and then via memory)
23141 instead of reloading the secondary memory address for Altivec moves. */
23142 if (TARGET_VSX
23143 && GET_MODE_SIZE (mode) < 16
23144 && !mode_supports_vmx_dform (mode)
23145 && (((rclass == GENERAL_REGS || rclass == BASE_REGS)
23146 && (regno >= 0 && ALTIVEC_REGNO_P (regno)))
23147 || ((rclass == VSX_REGS || rclass == ALTIVEC_REGS)
23148 && (regno >= 0 && INT_REGNO_P (regno)))))
23149 return FLOAT_REGS;
23151 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
23152 into anything. */
23153 if (rclass == GENERAL_REGS || rclass == BASE_REGS
23154 || (regno >= 0 && INT_REGNO_P (regno)))
23155 return NO_REGS;
23157 /* Constants, memory, and VSX registers can go into VSX registers (both the
23158 traditional floating point and the altivec registers). */
23159 if (rclass == VSX_REGS
23160 && (regno == -1 || VSX_REGNO_P (regno)))
23161 return NO_REGS;
23163 /* Constants, memory, and FP registers can go into FP registers. */
23164 if ((regno == -1 || FP_REGNO_P (regno))
23165 && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
23166 return (mode != SDmode || lra_in_progress) ? NO_REGS : GENERAL_REGS;
23168 /* Memory, and AltiVec registers can go into AltiVec registers. */
23169 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
23170 && rclass == ALTIVEC_REGS)
23171 return NO_REGS;
23173 /* We can copy among the CR registers. */
23174 if ((rclass == CR_REGS || rclass == CR0_REGS)
23175 && regno >= 0 && CR_REGNO_P (regno))
23176 return NO_REGS;
23178 /* Otherwise, we need GENERAL_REGS. */
23179 return GENERAL_REGS;
23182 /* Debug version of rs6000_secondary_reload_class. */
23183 static enum reg_class
23184 rs6000_debug_secondary_reload_class (enum reg_class rclass,
23185 machine_mode mode, rtx in)
23187 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
23188 fprintf (stderr,
23189 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
23190 "mode = %s, input rtx:\n",
23191 reg_class_names[ret], reg_class_names[rclass],
23192 GET_MODE_NAME (mode));
23193 debug_rtx (in);
23195 return ret;
23198 /* Return nonzero if for CLASS a mode change from FROM to TO is invalid. */
23200 static bool
23201 rs6000_cannot_change_mode_class (machine_mode from,
23202 machine_mode to,
23203 enum reg_class rclass)
23205 unsigned from_size = GET_MODE_SIZE (from);
23206 unsigned to_size = GET_MODE_SIZE (to);
23208 if (from_size != to_size)
23210 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
23212 if (reg_classes_intersect_p (xclass, rclass))
23214 unsigned to_nregs = hard_regno_nregs[FIRST_FPR_REGNO][to];
23215 unsigned from_nregs = hard_regno_nregs[FIRST_FPR_REGNO][from];
23216 bool to_float128_vector_p = FLOAT128_VECTOR_P (to);
23217 bool from_float128_vector_p = FLOAT128_VECTOR_P (from);
23219 /* Don't allow 64-bit types to overlap with 128-bit types that take a
23220 single register under VSX because the scalar part of the register
23221 is in the upper 64-bits, and not the lower 64-bits. Types like
23222 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
23223 IEEE floating point can't overlap, and neither can small
23224 values. */
23226 if (to_float128_vector_p && from_float128_vector_p)
23227 return false;
23229 else if (to_float128_vector_p || from_float128_vector_p)
23230 return true;
23232 /* TDmode in floating-mode registers must always go into a register
23233 pair with the most significant word in the even-numbered register
23234 to match ISA requirements. In little-endian mode, this does not
23235 match subreg numbering, so we cannot allow subregs. */
23236 if (!BYTES_BIG_ENDIAN && (to == TDmode || from == TDmode))
23237 return true;
23239 if (from_size < 8 || to_size < 8)
23240 return true;
23242 if (from_size == 8 && (8 * to_nregs) != to_size)
23243 return true;
23245 if (to_size == 8 && (8 * from_nregs) != from_size)
23246 return true;
23248 return false;
23250 else
23251 return false;
23254 if (TARGET_E500_DOUBLE
23255 && ((((to) == DFmode) + ((from) == DFmode)) == 1
23256 || (((to) == TFmode) + ((from) == TFmode)) == 1
23257 || (((to) == IFmode) + ((from) == IFmode)) == 1
23258 || (((to) == KFmode) + ((from) == KFmode)) == 1
23259 || (((to) == DDmode) + ((from) == DDmode)) == 1
23260 || (((to) == TDmode) + ((from) == TDmode)) == 1
23261 || (((to) == DImode) + ((from) == DImode)) == 1))
23262 return true;
23264 /* Since the VSX register set includes traditional floating point registers
23265 and altivec registers, just check for the size being different instead of
23266 trying to check whether the modes are vector modes. Otherwise it won't
23267 allow say DF and DI to change classes. For types like TFmode and TDmode
23268 that take 2 64-bit registers, rather than a single 128-bit register, don't
23269 allow subregs of those types to other 128 bit types. */
23270 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
23272 unsigned num_regs = (from_size + 15) / 16;
23273 if (hard_regno_nregs[FIRST_FPR_REGNO][to] > num_regs
23274 || hard_regno_nregs[FIRST_FPR_REGNO][from] > num_regs)
23275 return true;
23277 return (from_size != 8 && from_size != 16);
23280 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
23281 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
23282 return true;
23284 if (TARGET_SPE && (SPE_VECTOR_MODE (from) + SPE_VECTOR_MODE (to)) == 1
23285 && reg_classes_intersect_p (GENERAL_REGS, rclass))
23286 return true;
23288 return false;
23291 /* Debug version of rs6000_cannot_change_mode_class. */
23292 static bool
23293 rs6000_debug_cannot_change_mode_class (machine_mode from,
23294 machine_mode to,
23295 enum reg_class rclass)
23297 bool ret = rs6000_cannot_change_mode_class (from, to, rclass);
23299 fprintf (stderr,
23300 "rs6000_cannot_change_mode_class, return %s, from = %s, "
23301 "to = %s, rclass = %s\n",
23302 ret ? "true" : "false",
23303 GET_MODE_NAME (from), GET_MODE_NAME (to),
23304 reg_class_names[rclass]);
23306 return ret;
23309 /* Return a string to do a move operation of 128 bits of data. */
23311 const char *
23312 rs6000_output_move_128bit (rtx operands[])
23314 rtx dest = operands[0];
23315 rtx src = operands[1];
23316 machine_mode mode = GET_MODE (dest);
23317 int dest_regno;
23318 int src_regno;
23319 bool dest_gpr_p, dest_fp_p, dest_vmx_p, dest_vsx_p;
23320 bool src_gpr_p, src_fp_p, src_vmx_p, src_vsx_p;
23322 if (REG_P (dest))
23324 dest_regno = REGNO (dest);
23325 dest_gpr_p = INT_REGNO_P (dest_regno);
23326 dest_fp_p = FP_REGNO_P (dest_regno);
23327 dest_vmx_p = ALTIVEC_REGNO_P (dest_regno);
23328 dest_vsx_p = dest_fp_p | dest_vmx_p;
23330 else
23332 dest_regno = -1;
23333 dest_gpr_p = dest_fp_p = dest_vmx_p = dest_vsx_p = false;
23336 if (REG_P (src))
23338 src_regno = REGNO (src);
23339 src_gpr_p = INT_REGNO_P (src_regno);
23340 src_fp_p = FP_REGNO_P (src_regno);
23341 src_vmx_p = ALTIVEC_REGNO_P (src_regno);
23342 src_vsx_p = src_fp_p | src_vmx_p;
23344 else
23346 src_regno = -1;
23347 src_gpr_p = src_fp_p = src_vmx_p = src_vsx_p = false;
23350 /* Register moves. */
23351 if (dest_regno >= 0 && src_regno >= 0)
23353 if (dest_gpr_p)
23355 if (src_gpr_p)
23356 return "#";
23358 if (TARGET_DIRECT_MOVE_128 && src_vsx_p)
23359 return (WORDS_BIG_ENDIAN
23360 ? "mfvsrd %0,%x1\n\tmfvsrld %L0,%x1"
23361 : "mfvsrd %L0,%x1\n\tmfvsrld %0,%x1");
23363 else if (TARGET_VSX && TARGET_DIRECT_MOVE && src_vsx_p)
23364 return "#";
23367 else if (TARGET_VSX && dest_vsx_p)
23369 if (src_vsx_p)
23370 return "xxlor %x0,%x1,%x1";
23372 else if (TARGET_DIRECT_MOVE_128 && src_gpr_p)
23373 return (WORDS_BIG_ENDIAN
23374 ? "mtvsrdd %x0,%1,%L1"
23375 : "mtvsrdd %x0,%L1,%1");
23377 else if (TARGET_DIRECT_MOVE && src_gpr_p)
23378 return "#";
23381 else if (TARGET_ALTIVEC && dest_vmx_p && src_vmx_p)
23382 return "vor %0,%1,%1";
23384 else if (dest_fp_p && src_fp_p)
23385 return "#";
23388 /* Loads. */
23389 else if (dest_regno >= 0 && MEM_P (src))
23391 if (dest_gpr_p)
23393 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
23394 return "lq %0,%1";
23395 else
23396 return "#";
23399 else if (TARGET_ALTIVEC && dest_vmx_p
23400 && altivec_indexed_or_indirect_operand (src, mode))
23401 return "lvx %0,%y1";
23403 else if (TARGET_VSX && dest_vsx_p)
23405 if (mode_supports_vsx_dform_quad (mode)
23406 && quad_address_p (XEXP (src, 0), mode, true))
23407 return "lxv %x0,%1";
23409 else if (TARGET_P9_VECTOR)
23410 return "lxvx %x0,%y1";
23412 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
23413 return "lxvw4x %x0,%y1";
23415 else
23416 return "lxvd2x %x0,%y1";
23419 else if (TARGET_ALTIVEC && dest_vmx_p)
23420 return "lvx %0,%y1";
23422 else if (dest_fp_p)
23423 return "#";
23426 /* Stores. */
23427 else if (src_regno >= 0 && MEM_P (dest))
23429 if (src_gpr_p)
23431 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
23432 return "stq %1,%0";
23433 else
23434 return "#";
23437 else if (TARGET_ALTIVEC && src_vmx_p
23438 && altivec_indexed_or_indirect_operand (src, mode))
23439 return "stvx %1,%y0";
23441 else if (TARGET_VSX && src_vsx_p)
23443 if (mode_supports_vsx_dform_quad (mode)
23444 && quad_address_p (XEXP (dest, 0), mode, true))
23445 return "stxv %x1,%0";
23447 else if (TARGET_P9_VECTOR)
23448 return "stxvx %x1,%y0";
23450 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
23451 return "stxvw4x %x1,%y0";
23453 else
23454 return "stxvd2x %x1,%y0";
23457 else if (TARGET_ALTIVEC && src_vmx_p)
23458 return "stvx %1,%y0";
23460 else if (src_fp_p)
23461 return "#";
23464 /* Constants. */
23465 else if (dest_regno >= 0
23466 && (GET_CODE (src) == CONST_INT
23467 || GET_CODE (src) == CONST_WIDE_INT
23468 || GET_CODE (src) == CONST_DOUBLE
23469 || GET_CODE (src) == CONST_VECTOR))
23471 if (dest_gpr_p)
23472 return "#";
23474 else if ((dest_vmx_p && TARGET_ALTIVEC)
23475 || (dest_vsx_p && TARGET_VSX))
23476 return output_vec_const_move (operands);
23479 fatal_insn ("Bad 128-bit move", gen_rtx_SET (dest, src));
23482 /* Validate a 128-bit move. */
23483 bool
23484 rs6000_move_128bit_ok_p (rtx operands[])
23486 machine_mode mode = GET_MODE (operands[0]);
23487 return (gpc_reg_operand (operands[0], mode)
23488 || gpc_reg_operand (operands[1], mode));
23491 /* Return true if a 128-bit move needs to be split. */
23492 bool
23493 rs6000_split_128bit_ok_p (rtx operands[])
23495 if (!reload_completed)
23496 return false;
23498 if (!gpr_or_gpr_p (operands[0], operands[1]))
23499 return false;
23501 if (quad_load_store_p (operands[0], operands[1]))
23502 return false;
23504 return true;
23508 /* Given a comparison operation, return the bit number in CCR to test. We
23509 know this is a valid comparison.
23511 SCC_P is 1 if this is for an scc. That means that %D will have been
23512 used instead of %C, so the bits will be in different places.
23514 Return -1 if OP isn't a valid comparison for some reason. */
23517 ccr_bit (rtx op, int scc_p)
23519 enum rtx_code code = GET_CODE (op);
23520 machine_mode cc_mode;
23521 int cc_regnum;
23522 int base_bit;
23523 rtx reg;
23525 if (!COMPARISON_P (op))
23526 return -1;
23528 reg = XEXP (op, 0);
23530 gcc_assert (GET_CODE (reg) == REG && CR_REGNO_P (REGNO (reg)));
23532 cc_mode = GET_MODE (reg);
23533 cc_regnum = REGNO (reg);
23534 base_bit = 4 * (cc_regnum - CR0_REGNO);
23536 validate_condition_mode (code, cc_mode);
23538 /* When generating a sCOND operation, only positive conditions are
23539 allowed. */
23540 gcc_assert (!scc_p
23541 || code == EQ || code == GT || code == LT || code == UNORDERED
23542 || code == GTU || code == LTU);
23544 switch (code)
23546 case NE:
23547 return scc_p ? base_bit + 3 : base_bit + 2;
23548 case EQ:
23549 return base_bit + 2;
23550 case GT: case GTU: case UNLE:
23551 return base_bit + 1;
23552 case LT: case LTU: case UNGE:
23553 return base_bit;
23554 case ORDERED: case UNORDERED:
23555 return base_bit + 3;
23557 case GE: case GEU:
23558 /* If scc, we will have done a cror to put the bit in the
23559 unordered position. So test that bit. For integer, this is ! LT
23560 unless this is an scc insn. */
23561 return scc_p ? base_bit + 3 : base_bit;
23563 case LE: case LEU:
23564 return scc_p ? base_bit + 3 : base_bit + 1;
23566 default:
23567 gcc_unreachable ();
23571 /* Return the GOT register. */
23574 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
23576 /* The second flow pass currently (June 1999) can't update
23577 regs_ever_live without disturbing other parts of the compiler, so
23578 update it here to make the prolog/epilogue code happy. */
23579 if (!can_create_pseudo_p ()
23580 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
23581 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
23583 crtl->uses_pic_offset_table = 1;
23585 return pic_offset_table_rtx;
23588 static rs6000_stack_t stack_info;
23590 /* Function to init struct machine_function.
23591 This will be called, via a pointer variable,
23592 from push_function_context. */
23594 static struct machine_function *
23595 rs6000_init_machine_status (void)
23597 stack_info.reload_completed = 0;
23598 return ggc_cleared_alloc<machine_function> ();
23601 #define INT_P(X) (GET_CODE (X) == CONST_INT && GET_MODE (X) == VOIDmode)
23603 /* Write out a function code label. */
23605 void
23606 rs6000_output_function_entry (FILE *file, const char *fname)
23608 if (fname[0] != '.')
23610 switch (DEFAULT_ABI)
23612 default:
23613 gcc_unreachable ();
23615 case ABI_AIX:
23616 if (DOT_SYMBOLS)
23617 putc ('.', file);
23618 else
23619 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
23620 break;
23622 case ABI_ELFv2:
23623 case ABI_V4:
23624 case ABI_DARWIN:
23625 break;
23629 RS6000_OUTPUT_BASENAME (file, fname);
23632 /* Print an operand. Recognize special options, documented below. */
23634 #if TARGET_ELF
23635 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
23636 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
23637 #else
23638 #define SMALL_DATA_RELOC "sda21"
23639 #define SMALL_DATA_REG 0
23640 #endif
23642 void
23643 print_operand (FILE *file, rtx x, int code)
23645 int i;
23646 unsigned HOST_WIDE_INT uval;
23648 switch (code)
23650 /* %a is output_address. */
23652 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
23653 output_operand. */
23655 case 'D':
23656 /* Like 'J' but get to the GT bit only. */
23657 gcc_assert (REG_P (x));
23659 /* Bit 1 is GT bit. */
23660 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
23662 /* Add one for shift count in rlinm for scc. */
23663 fprintf (file, "%d", i + 1);
23664 return;
23666 case 'e':
23667 /* If the low 16 bits are 0, but some other bit is set, write 's'. */
23668 if (! INT_P (x))
23670 output_operand_lossage ("invalid %%e value");
23671 return;
23674 uval = INTVAL (x);
23675 if ((uval & 0xffff) == 0 && uval != 0)
23676 putc ('s', file);
23677 return;
23679 case 'E':
23680 /* X is a CR register. Print the number of the EQ bit of the CR */
23681 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
23682 output_operand_lossage ("invalid %%E value");
23683 else
23684 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
23685 return;
23687 case 'f':
23688 /* X is a CR register. Print the shift count needed to move it
23689 to the high-order four bits. */
23690 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
23691 output_operand_lossage ("invalid %%f value");
23692 else
23693 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
23694 return;
23696 case 'F':
23697 /* Similar, but print the count for the rotate in the opposite
23698 direction. */
23699 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
23700 output_operand_lossage ("invalid %%F value");
23701 else
23702 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
23703 return;
23705 case 'G':
23706 /* X is a constant integer. If it is negative, print "m",
23707 otherwise print "z". This is to make an aze or ame insn. */
23708 if (GET_CODE (x) != CONST_INT)
23709 output_operand_lossage ("invalid %%G value");
23710 else if (INTVAL (x) >= 0)
23711 putc ('z', file);
23712 else
23713 putc ('m', file);
23714 return;
23716 case 'h':
23717 /* If constant, output low-order five bits. Otherwise, write
23718 normally. */
23719 if (INT_P (x))
23720 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 31);
23721 else
23722 print_operand (file, x, 0);
23723 return;
23725 case 'H':
23726 /* If constant, output low-order six bits. Otherwise, write
23727 normally. */
23728 if (INT_P (x))
23729 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 63);
23730 else
23731 print_operand (file, x, 0);
23732 return;
23734 case 'I':
23735 /* Print `i' if this is a constant, else nothing. */
23736 if (INT_P (x))
23737 putc ('i', file);
23738 return;
23740 case 'j':
23741 /* Write the bit number in CCR for jump. */
23742 i = ccr_bit (x, 0);
23743 if (i == -1)
23744 output_operand_lossage ("invalid %%j code");
23745 else
23746 fprintf (file, "%d", i);
23747 return;
23749 case 'J':
23750 /* Similar, but add one for shift count in rlinm for scc and pass
23751 scc flag to `ccr_bit'. */
23752 i = ccr_bit (x, 1);
23753 if (i == -1)
23754 output_operand_lossage ("invalid %%J code");
23755 else
23756 /* If we want bit 31, write a shift count of zero, not 32. */
23757 fprintf (file, "%d", i == 31 ? 0 : i + 1);
23758 return;
23760 case 'k':
23761 /* X must be a constant. Write the 1's complement of the
23762 constant. */
23763 if (! INT_P (x))
23764 output_operand_lossage ("invalid %%k value");
23765 else
23766 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
23767 return;
23769 case 'K':
23770 /* X must be a symbolic constant on ELF. Write an
23771 expression suitable for an 'addi' that adds in the low 16
23772 bits of the MEM. */
23773 if (GET_CODE (x) == CONST)
23775 if (GET_CODE (XEXP (x, 0)) != PLUS
23776 || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
23777 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
23778 || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
23779 output_operand_lossage ("invalid %%K value");
23781 print_operand_address (file, x);
23782 fputs ("@l", file);
23783 return;
23785 /* %l is output_asm_label. */
23787 case 'L':
23788 /* Write second word of DImode or DFmode reference. Works on register
23789 or non-indexed memory only. */
23790 if (REG_P (x))
23791 fputs (reg_names[REGNO (x) + 1], file);
23792 else if (MEM_P (x))
23794 machine_mode mode = GET_MODE (x);
23795 /* Handle possible auto-increment. Since it is pre-increment and
23796 we have already done it, we can just use an offset of word. */
23797 if (GET_CODE (XEXP (x, 0)) == PRE_INC
23798 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
23799 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
23800 UNITS_PER_WORD));
23801 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
23802 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
23803 UNITS_PER_WORD));
23804 else
23805 output_address (mode, XEXP (adjust_address_nv (x, SImode,
23806 UNITS_PER_WORD),
23807 0));
23809 if (small_data_operand (x, GET_MODE (x)))
23810 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
23811 reg_names[SMALL_DATA_REG]);
23813 return;
23815 case 'N':
23816 /* Write the number of elements in the vector times 4. */
23817 if (GET_CODE (x) != PARALLEL)
23818 output_operand_lossage ("invalid %%N value");
23819 else
23820 fprintf (file, "%d", XVECLEN (x, 0) * 4);
23821 return;
23823 case 'O':
23824 /* Similar, but subtract 1 first. */
23825 if (GET_CODE (x) != PARALLEL)
23826 output_operand_lossage ("invalid %%O value");
23827 else
23828 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
23829 return;
23831 case 'p':
23832 /* X is a CONST_INT that is a power of two. Output the logarithm. */
23833 if (! INT_P (x)
23834 || INTVAL (x) < 0
23835 || (i = exact_log2 (INTVAL (x))) < 0)
23836 output_operand_lossage ("invalid %%p value");
23837 else
23838 fprintf (file, "%d", i);
23839 return;
23841 case 'P':
23842 /* The operand must be an indirect memory reference. The result
23843 is the register name. */
23844 if (GET_CODE (x) != MEM || GET_CODE (XEXP (x, 0)) != REG
23845 || REGNO (XEXP (x, 0)) >= 32)
23846 output_operand_lossage ("invalid %%P value");
23847 else
23848 fputs (reg_names[REGNO (XEXP (x, 0))], file);
23849 return;
23851 case 'q':
23852 /* This outputs the logical code corresponding to a boolean
23853 expression. The expression may have one or both operands
23854 negated (if one, only the first one). For condition register
23855 logical operations, it will also treat the negated
23856 CR codes as NOTs, but not handle NOTs of them. */
23858 const char *const *t = 0;
23859 const char *s;
23860 enum rtx_code code = GET_CODE (x);
23861 static const char * const tbl[3][3] = {
23862 { "and", "andc", "nor" },
23863 { "or", "orc", "nand" },
23864 { "xor", "eqv", "xor" } };
23866 if (code == AND)
23867 t = tbl[0];
23868 else if (code == IOR)
23869 t = tbl[1];
23870 else if (code == XOR)
23871 t = tbl[2];
23872 else
23873 output_operand_lossage ("invalid %%q value");
23875 if (GET_CODE (XEXP (x, 0)) != NOT)
23876 s = t[0];
23877 else
23879 if (GET_CODE (XEXP (x, 1)) == NOT)
23880 s = t[2];
23881 else
23882 s = t[1];
23885 fputs (s, file);
23887 return;
23889 case 'Q':
23890 if (! TARGET_MFCRF)
23891 return;
23892 fputc (',', file);
23893 /* FALLTHRU */
23895 case 'R':
23896 /* X is a CR register. Print the mask for `mtcrf'. */
23897 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
23898 output_operand_lossage ("invalid %%R value");
23899 else
23900 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
23901 return;
23903 case 's':
23904 /* Low 5 bits of 32 - value */
23905 if (! INT_P (x))
23906 output_operand_lossage ("invalid %%s value");
23907 else
23908 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INTVAL (x)) & 31);
23909 return;
23911 case 't':
23912 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
23913 gcc_assert (REG_P (x) && GET_MODE (x) == CCmode);
23915 /* Bit 3 is OV bit. */
23916 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
23918 /* If we want bit 31, write a shift count of zero, not 32. */
23919 fprintf (file, "%d", i == 31 ? 0 : i + 1);
23920 return;
23922 case 'T':
23923 /* Print the symbolic name of a branch target register. */
23924 if (GET_CODE (x) != REG || (REGNO (x) != LR_REGNO
23925 && REGNO (x) != CTR_REGNO))
23926 output_operand_lossage ("invalid %%T value");
23927 else if (REGNO (x) == LR_REGNO)
23928 fputs ("lr", file);
23929 else
23930 fputs ("ctr", file);
23931 return;
23933 case 'u':
23934 /* High-order or low-order 16 bits of constant, whichever is non-zero,
23935 for use in unsigned operand. */
23936 if (! INT_P (x))
23938 output_operand_lossage ("invalid %%u value");
23939 return;
23942 uval = INTVAL (x);
23943 if ((uval & 0xffff) == 0)
23944 uval >>= 16;
23946 fprintf (file, HOST_WIDE_INT_PRINT_HEX, uval & 0xffff);
23947 return;
23949 case 'v':
23950 /* High-order 16 bits of constant for use in signed operand. */
23951 if (! INT_P (x))
23952 output_operand_lossage ("invalid %%v value");
23953 else
23954 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
23955 (INTVAL (x) >> 16) & 0xffff);
23956 return;
23958 case 'U':
23959 /* Print `u' if this has an auto-increment or auto-decrement. */
23960 if (MEM_P (x)
23961 && (GET_CODE (XEXP (x, 0)) == PRE_INC
23962 || GET_CODE (XEXP (x, 0)) == PRE_DEC
23963 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
23964 putc ('u', file);
23965 return;
23967 case 'V':
23968 /* Print the trap code for this operand. */
23969 switch (GET_CODE (x))
23971 case EQ:
23972 fputs ("eq", file); /* 4 */
23973 break;
23974 case NE:
23975 fputs ("ne", file); /* 24 */
23976 break;
23977 case LT:
23978 fputs ("lt", file); /* 16 */
23979 break;
23980 case LE:
23981 fputs ("le", file); /* 20 */
23982 break;
23983 case GT:
23984 fputs ("gt", file); /* 8 */
23985 break;
23986 case GE:
23987 fputs ("ge", file); /* 12 */
23988 break;
23989 case LTU:
23990 fputs ("llt", file); /* 2 */
23991 break;
23992 case LEU:
23993 fputs ("lle", file); /* 6 */
23994 break;
23995 case GTU:
23996 fputs ("lgt", file); /* 1 */
23997 break;
23998 case GEU:
23999 fputs ("lge", file); /* 5 */
24000 break;
24001 default:
24002 gcc_unreachable ();
24004 break;
24006 case 'w':
24007 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
24008 normally. */
24009 if (INT_P (x))
24010 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
24011 ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
24012 else
24013 print_operand (file, x, 0);
24014 return;
24016 case 'x':
24017 /* X is a FPR or Altivec register used in a VSX context. */
24018 if (GET_CODE (x) != REG || !VSX_REGNO_P (REGNO (x)))
24019 output_operand_lossage ("invalid %%x value");
24020 else
24022 int reg = REGNO (x);
24023 int vsx_reg = (FP_REGNO_P (reg)
24024 ? reg - 32
24025 : reg - FIRST_ALTIVEC_REGNO + 32);
24027 #ifdef TARGET_REGNAMES
24028 if (TARGET_REGNAMES)
24029 fprintf (file, "%%vs%d", vsx_reg);
24030 else
24031 #endif
24032 fprintf (file, "%d", vsx_reg);
24034 return;
24036 case 'X':
24037 if (MEM_P (x)
24038 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
24039 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
24040 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
24041 putc ('x', file);
24042 return;
24044 case 'Y':
24045 /* Like 'L', for third word of TImode/PTImode */
24046 if (REG_P (x))
24047 fputs (reg_names[REGNO (x) + 2], file);
24048 else if (MEM_P (x))
24050 machine_mode mode = GET_MODE (x);
24051 if (GET_CODE (XEXP (x, 0)) == PRE_INC
24052 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
24053 output_address (mode, plus_constant (Pmode,
24054 XEXP (XEXP (x, 0), 0), 8));
24055 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
24056 output_address (mode, plus_constant (Pmode,
24057 XEXP (XEXP (x, 0), 0), 8));
24058 else
24059 output_address (mode, XEXP (adjust_address_nv (x, SImode, 8), 0));
24060 if (small_data_operand (x, GET_MODE (x)))
24061 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
24062 reg_names[SMALL_DATA_REG]);
24064 return;
24066 case 'z':
24067 /* X is a SYMBOL_REF. Write out the name preceded by a
24068 period and without any trailing data in brackets. Used for function
24069 names. If we are configured for System V (or the embedded ABI) on
24070 the PowerPC, do not emit the period, since those systems do not use
24071 TOCs and the like. */
24072 gcc_assert (GET_CODE (x) == SYMBOL_REF);
24074 /* For macho, check to see if we need a stub. */
24075 if (TARGET_MACHO)
24077 const char *name = XSTR (x, 0);
24078 #if TARGET_MACHO
24079 if (darwin_emit_branch_islands
24080 && MACHOPIC_INDIRECT
24081 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
24082 name = machopic_indirection_name (x, /*stub_p=*/true);
24083 #endif
24084 assemble_name (file, name);
24086 else if (!DOT_SYMBOLS)
24087 assemble_name (file, XSTR (x, 0));
24088 else
24089 rs6000_output_function_entry (file, XSTR (x, 0));
24090 return;
24092 case 'Z':
24093 /* Like 'L', for last word of TImode/PTImode. */
24094 if (REG_P (x))
24095 fputs (reg_names[REGNO (x) + 3], file);
24096 else if (MEM_P (x))
24098 machine_mode mode = GET_MODE (x);
24099 if (GET_CODE (XEXP (x, 0)) == PRE_INC
24100 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
24101 output_address (mode, plus_constant (Pmode,
24102 XEXP (XEXP (x, 0), 0), 12));
24103 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
24104 output_address (mode, plus_constant (Pmode,
24105 XEXP (XEXP (x, 0), 0), 12));
24106 else
24107 output_address (mode, XEXP (adjust_address_nv (x, SImode, 12), 0));
24108 if (small_data_operand (x, GET_MODE (x)))
24109 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
24110 reg_names[SMALL_DATA_REG]);
24112 return;
24114 /* Print AltiVec or SPE memory operand. */
24115 case 'y':
24117 rtx tmp;
24119 gcc_assert (MEM_P (x));
24121 tmp = XEXP (x, 0);
24123 /* Ugly hack because %y is overloaded. */
24124 if ((TARGET_SPE || TARGET_E500_DOUBLE)
24125 && (GET_MODE_SIZE (GET_MODE (x)) == 8
24126 || FLOAT128_2REG_P (GET_MODE (x))
24127 || GET_MODE (x) == TImode
24128 || GET_MODE (x) == PTImode))
24130 /* Handle [reg]. */
24131 if (REG_P (tmp))
24133 fprintf (file, "0(%s)", reg_names[REGNO (tmp)]);
24134 break;
24136 /* Handle [reg+UIMM]. */
24137 else if (GET_CODE (tmp) == PLUS &&
24138 GET_CODE (XEXP (tmp, 1)) == CONST_INT)
24140 int x;
24142 gcc_assert (REG_P (XEXP (tmp, 0)));
24144 x = INTVAL (XEXP (tmp, 1));
24145 fprintf (file, "%d(%s)", x, reg_names[REGNO (XEXP (tmp, 0))]);
24146 break;
24149 /* Fall through. Must be [reg+reg]. */
24151 if (VECTOR_MEM_ALTIVEC_P (GET_MODE (x))
24152 && GET_CODE (tmp) == AND
24153 && GET_CODE (XEXP (tmp, 1)) == CONST_INT
24154 && INTVAL (XEXP (tmp, 1)) == -16)
24155 tmp = XEXP (tmp, 0);
24156 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
24157 && GET_CODE (tmp) == PRE_MODIFY)
24158 tmp = XEXP (tmp, 1);
24159 if (REG_P (tmp))
24160 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
24161 else
24163 if (GET_CODE (tmp) != PLUS
24164 || !REG_P (XEXP (tmp, 0))
24165 || !REG_P (XEXP (tmp, 1)))
24167 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
24168 break;
24171 if (REGNO (XEXP (tmp, 0)) == 0)
24172 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
24173 reg_names[ REGNO (XEXP (tmp, 0)) ]);
24174 else
24175 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
24176 reg_names[ REGNO (XEXP (tmp, 1)) ]);
24178 break;
24181 case 0:
24182 if (REG_P (x))
24183 fprintf (file, "%s", reg_names[REGNO (x)]);
24184 else if (MEM_P (x))
24186 /* We need to handle PRE_INC and PRE_DEC here, since we need to
24187 know the width from the mode. */
24188 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
24189 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
24190 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
24191 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
24192 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
24193 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
24194 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
24195 output_address (GET_MODE (x), XEXP (XEXP (x, 0), 1));
24196 else
24197 output_address (GET_MODE (x), XEXP (x, 0));
24199 else
24201 if (toc_relative_expr_p (x, false))
24202 /* This hack along with a corresponding hack in
24203 rs6000_output_addr_const_extra arranges to output addends
24204 where the assembler expects to find them. eg.
24205 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
24206 without this hack would be output as "x@toc+4". We
24207 want "x+4@toc". */
24208 output_addr_const (file, CONST_CAST_RTX (tocrel_base));
24209 else
24210 output_addr_const (file, x);
24212 return;
24214 case '&':
24215 if (const char *name = get_some_local_dynamic_name ())
24216 assemble_name (file, name);
24217 else
24218 output_operand_lossage ("'%%&' used without any "
24219 "local dynamic TLS references");
24220 return;
24222 default:
24223 output_operand_lossage ("invalid %%xn code");
24227 /* Print the address of an operand. */
24229 void
24230 print_operand_address (FILE *file, rtx x)
24232 if (REG_P (x))
24233 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
24234 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST
24235 || GET_CODE (x) == LABEL_REF)
24237 output_addr_const (file, x);
24238 if (small_data_operand (x, GET_MODE (x)))
24239 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
24240 reg_names[SMALL_DATA_REG]);
24241 else
24242 gcc_assert (!TARGET_TOC);
24244 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
24245 && REG_P (XEXP (x, 1)))
24247 if (REGNO (XEXP (x, 0)) == 0)
24248 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
24249 reg_names[ REGNO (XEXP (x, 0)) ]);
24250 else
24251 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
24252 reg_names[ REGNO (XEXP (x, 1)) ]);
24254 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
24255 && GET_CODE (XEXP (x, 1)) == CONST_INT)
24256 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
24257 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
24258 #if TARGET_MACHO
24259 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
24260 && CONSTANT_P (XEXP (x, 1)))
24262 fprintf (file, "lo16(");
24263 output_addr_const (file, XEXP (x, 1));
24264 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
24266 #endif
24267 #if TARGET_ELF
24268 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
24269 && CONSTANT_P (XEXP (x, 1)))
24271 output_addr_const (file, XEXP (x, 1));
24272 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
24274 #endif
24275 else if (toc_relative_expr_p (x, false))
24277 /* This hack along with a corresponding hack in
24278 rs6000_output_addr_const_extra arranges to output addends
24279 where the assembler expects to find them. eg.
24280 (lo_sum (reg 9)
24281 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
24282 without this hack would be output as "x@toc+8@l(9)". We
24283 want "x+8@toc@l(9)". */
24284 output_addr_const (file, CONST_CAST_RTX (tocrel_base));
24285 if (GET_CODE (x) == LO_SUM)
24286 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
24287 else
24288 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base, 0, 1))]);
24290 else
24291 gcc_unreachable ();
24294 /* Implement TARGET_OUTPUT_ADDR_CONST_EXTRA. */
24296 static bool
24297 rs6000_output_addr_const_extra (FILE *file, rtx x)
24299 if (GET_CODE (x) == UNSPEC)
24300 switch (XINT (x, 1))
24302 case UNSPEC_TOCREL:
24303 gcc_checking_assert (GET_CODE (XVECEXP (x, 0, 0)) == SYMBOL_REF
24304 && REG_P (XVECEXP (x, 0, 1))
24305 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
24306 output_addr_const (file, XVECEXP (x, 0, 0));
24307 if (x == tocrel_base && tocrel_offset != const0_rtx)
24309 if (INTVAL (tocrel_offset) >= 0)
24310 fprintf (file, "+");
24311 output_addr_const (file, CONST_CAST_RTX (tocrel_offset));
24313 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
24315 putc ('-', file);
24316 assemble_name (file, toc_label_name);
24317 need_toc_init = 1;
24319 else if (TARGET_ELF)
24320 fputs ("@toc", file);
24321 return true;
24323 #if TARGET_MACHO
24324 case UNSPEC_MACHOPIC_OFFSET:
24325 output_addr_const (file, XVECEXP (x, 0, 0));
24326 putc ('-', file);
24327 machopic_output_function_base_name (file);
24328 return true;
24329 #endif
24331 return false;
24334 /* Target hook for assembling integer objects. The PowerPC version has
24335 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
24336 is defined. It also needs to handle DI-mode objects on 64-bit
24337 targets. */
24339 static bool
24340 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
24342 #ifdef RELOCATABLE_NEEDS_FIXUP
24343 /* Special handling for SI values. */
24344 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
24346 static int recurse = 0;
24348 /* For -mrelocatable, we mark all addresses that need to be fixed up in
24349 the .fixup section. Since the TOC section is already relocated, we
24350 don't need to mark it here. We used to skip the text section, but it
24351 should never be valid for relocated addresses to be placed in the text
24352 section. */
24353 if (DEFAULT_ABI == ABI_V4
24354 && (TARGET_RELOCATABLE || flag_pic > 1)
24355 && in_section != toc_section
24356 && !recurse
24357 && !CONST_SCALAR_INT_P (x)
24358 && CONSTANT_P (x))
24360 char buf[256];
24362 recurse = 1;
24363 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
24364 fixuplabelno++;
24365 ASM_OUTPUT_LABEL (asm_out_file, buf);
24366 fprintf (asm_out_file, "\t.long\t(");
24367 output_addr_const (asm_out_file, x);
24368 fprintf (asm_out_file, ")@fixup\n");
24369 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
24370 ASM_OUTPUT_ALIGN (asm_out_file, 2);
24371 fprintf (asm_out_file, "\t.long\t");
24372 assemble_name (asm_out_file, buf);
24373 fprintf (asm_out_file, "\n\t.previous\n");
24374 recurse = 0;
24375 return true;
24377 /* Remove initial .'s to turn a -mcall-aixdesc function
24378 address into the address of the descriptor, not the function
24379 itself. */
24380 else if (GET_CODE (x) == SYMBOL_REF
24381 && XSTR (x, 0)[0] == '.'
24382 && DEFAULT_ABI == ABI_AIX)
24384 const char *name = XSTR (x, 0);
24385 while (*name == '.')
24386 name++;
24388 fprintf (asm_out_file, "\t.long\t%s\n", name);
24389 return true;
24392 #endif /* RELOCATABLE_NEEDS_FIXUP */
24393 return default_assemble_integer (x, size, aligned_p);
24396 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
24397 /* Emit an assembler directive to set symbol visibility for DECL to
24398 VISIBILITY_TYPE. */
24400 static void
24401 rs6000_assemble_visibility (tree decl, int vis)
24403 if (TARGET_XCOFF)
24404 return;
24406 /* Functions need to have their entry point symbol visibility set as
24407 well as their descriptor symbol visibility. */
24408 if (DEFAULT_ABI == ABI_AIX
24409 && DOT_SYMBOLS
24410 && TREE_CODE (decl) == FUNCTION_DECL)
24412 static const char * const visibility_types[] = {
24413 NULL, "protected", "hidden", "internal"
24416 const char *name, *type;
24418 name = ((* targetm.strip_name_encoding)
24419 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
24420 type = visibility_types[vis];
24422 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
24423 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
24425 else
24426 default_assemble_visibility (decl, vis);
24428 #endif
24430 enum rtx_code
24431 rs6000_reverse_condition (machine_mode mode, enum rtx_code code)
24433 /* Reversal of FP compares takes care -- an ordered compare
24434 becomes an unordered compare and vice versa. */
24435 if (mode == CCFPmode
24436 && (!flag_finite_math_only
24437 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
24438 || code == UNEQ || code == LTGT))
24439 return reverse_condition_maybe_unordered (code);
24440 else
24441 return reverse_condition (code);
24444 /* Generate a compare for CODE. Return a brand-new rtx that
24445 represents the result of the compare. */
24447 static rtx
24448 rs6000_generate_compare (rtx cmp, machine_mode mode)
24450 machine_mode comp_mode;
24451 rtx compare_result;
24452 enum rtx_code code = GET_CODE (cmp);
24453 rtx op0 = XEXP (cmp, 0);
24454 rtx op1 = XEXP (cmp, 1);
24456 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
24457 comp_mode = CCmode;
24458 else if (FLOAT_MODE_P (mode))
24459 comp_mode = CCFPmode;
24460 else if (code == GTU || code == LTU
24461 || code == GEU || code == LEU)
24462 comp_mode = CCUNSmode;
24463 else if ((code == EQ || code == NE)
24464 && unsigned_reg_p (op0)
24465 && (unsigned_reg_p (op1)
24466 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
24467 /* These are unsigned values, perhaps there will be a later
24468 ordering compare that can be shared with this one. */
24469 comp_mode = CCUNSmode;
24470 else
24471 comp_mode = CCmode;
24473 /* If we have an unsigned compare, make sure we don't have a signed value as
24474 an immediate. */
24475 if (comp_mode == CCUNSmode && GET_CODE (op1) == CONST_INT
24476 && INTVAL (op1) < 0)
24478 op0 = copy_rtx_if_shared (op0);
24479 op1 = force_reg (GET_MODE (op0), op1);
24480 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
24483 /* First, the compare. */
24484 compare_result = gen_reg_rtx (comp_mode);
24486 /* E500 FP compare instructions on the GPRs. Yuck! */
24487 if ((!TARGET_FPRS && TARGET_HARD_FLOAT)
24488 && FLOAT_MODE_P (mode))
24490 rtx cmp, or_result, compare_result2;
24491 machine_mode op_mode = GET_MODE (op0);
24492 bool reverse_p;
24494 if (op_mode == VOIDmode)
24495 op_mode = GET_MODE (op1);
24497 /* First reverse the condition codes that aren't directly supported. */
24498 switch (code)
24500 case NE:
24501 case UNLT:
24502 case UNLE:
24503 case UNGT:
24504 case UNGE:
24505 code = reverse_condition_maybe_unordered (code);
24506 reverse_p = true;
24507 break;
24509 case EQ:
24510 case LT:
24511 case LE:
24512 case GT:
24513 case GE:
24514 reverse_p = false;
24515 break;
24517 default:
24518 gcc_unreachable ();
24521 /* The E500 FP compare instructions toggle the GT bit (CR bit 1) only.
24522 This explains the following mess. */
24524 switch (code)
24526 case EQ:
24527 switch (op_mode)
24529 case SFmode:
24530 cmp = (flag_finite_math_only && !flag_trapping_math)
24531 ? gen_tstsfeq_gpr (compare_result, op0, op1)
24532 : gen_cmpsfeq_gpr (compare_result, op0, op1);
24533 break;
24535 case DFmode:
24536 cmp = (flag_finite_math_only && !flag_trapping_math)
24537 ? gen_tstdfeq_gpr (compare_result, op0, op1)
24538 : gen_cmpdfeq_gpr (compare_result, op0, op1);
24539 break;
24541 case TFmode:
24542 case IFmode:
24543 case KFmode:
24544 cmp = (flag_finite_math_only && !flag_trapping_math)
24545 ? gen_tsttfeq_gpr (compare_result, op0, op1)
24546 : gen_cmptfeq_gpr (compare_result, op0, op1);
24547 break;
24549 default:
24550 gcc_unreachable ();
24552 break;
24554 case GT:
24555 case GE:
24556 switch (op_mode)
24558 case SFmode:
24559 cmp = (flag_finite_math_only && !flag_trapping_math)
24560 ? gen_tstsfgt_gpr (compare_result, op0, op1)
24561 : gen_cmpsfgt_gpr (compare_result, op0, op1);
24562 break;
24564 case DFmode:
24565 cmp = (flag_finite_math_only && !flag_trapping_math)
24566 ? gen_tstdfgt_gpr (compare_result, op0, op1)
24567 : gen_cmpdfgt_gpr (compare_result, op0, op1);
24568 break;
24570 case TFmode:
24571 case IFmode:
24572 case KFmode:
24573 cmp = (flag_finite_math_only && !flag_trapping_math)
24574 ? gen_tsttfgt_gpr (compare_result, op0, op1)
24575 : gen_cmptfgt_gpr (compare_result, op0, op1);
24576 break;
24578 default:
24579 gcc_unreachable ();
24581 break;
24583 case LT:
24584 case LE:
24585 switch (op_mode)
24587 case SFmode:
24588 cmp = (flag_finite_math_only && !flag_trapping_math)
24589 ? gen_tstsflt_gpr (compare_result, op0, op1)
24590 : gen_cmpsflt_gpr (compare_result, op0, op1);
24591 break;
24593 case DFmode:
24594 cmp = (flag_finite_math_only && !flag_trapping_math)
24595 ? gen_tstdflt_gpr (compare_result, op0, op1)
24596 : gen_cmpdflt_gpr (compare_result, op0, op1);
24597 break;
24599 case TFmode:
24600 case IFmode:
24601 case KFmode:
24602 cmp = (flag_finite_math_only && !flag_trapping_math)
24603 ? gen_tsttflt_gpr (compare_result, op0, op1)
24604 : gen_cmptflt_gpr (compare_result, op0, op1);
24605 break;
24607 default:
24608 gcc_unreachable ();
24610 break;
24612 default:
24613 gcc_unreachable ();
24616 /* Synthesize LE and GE from LT/GT || EQ. */
24617 if (code == LE || code == GE)
24619 emit_insn (cmp);
24621 compare_result2 = gen_reg_rtx (CCFPmode);
24623 /* Do the EQ. */
24624 switch (op_mode)
24626 case SFmode:
24627 cmp = (flag_finite_math_only && !flag_trapping_math)
24628 ? gen_tstsfeq_gpr (compare_result2, op0, op1)
24629 : gen_cmpsfeq_gpr (compare_result2, op0, op1);
24630 break;
24632 case DFmode:
24633 cmp = (flag_finite_math_only && !flag_trapping_math)
24634 ? gen_tstdfeq_gpr (compare_result2, op0, op1)
24635 : gen_cmpdfeq_gpr (compare_result2, op0, op1);
24636 break;
24638 case TFmode:
24639 case IFmode:
24640 case KFmode:
24641 cmp = (flag_finite_math_only && !flag_trapping_math)
24642 ? gen_tsttfeq_gpr (compare_result2, op0, op1)
24643 : gen_cmptfeq_gpr (compare_result2, op0, op1);
24644 break;
24646 default:
24647 gcc_unreachable ();
24650 emit_insn (cmp);
24652 /* OR them together. */
24653 or_result = gen_reg_rtx (CCFPmode);
24654 cmp = gen_e500_cr_ior_compare (or_result, compare_result,
24655 compare_result2);
24656 compare_result = or_result;
24659 code = reverse_p ? NE : EQ;
24661 emit_insn (cmp);
24664 /* IEEE 128-bit support in VSX registers when we do not have hardware
24665 support. */
24666 else if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
24668 rtx libfunc = NULL_RTX;
24669 bool check_nan = false;
24670 rtx dest;
24672 switch (code)
24674 case EQ:
24675 case NE:
24676 libfunc = optab_libfunc (eq_optab, mode);
24677 break;
24679 case GT:
24680 case GE:
24681 libfunc = optab_libfunc (ge_optab, mode);
24682 break;
24684 case LT:
24685 case LE:
24686 libfunc = optab_libfunc (le_optab, mode);
24687 break;
24689 case UNORDERED:
24690 case ORDERED:
24691 libfunc = optab_libfunc (unord_optab, mode);
24692 code = (code == UNORDERED) ? NE : EQ;
24693 break;
24695 case UNGE:
24696 case UNGT:
24697 check_nan = true;
24698 libfunc = optab_libfunc (ge_optab, mode);
24699 code = (code == UNGE) ? GE : GT;
24700 break;
24702 case UNLE:
24703 case UNLT:
24704 check_nan = true;
24705 libfunc = optab_libfunc (le_optab, mode);
24706 code = (code == UNLE) ? LE : LT;
24707 break;
24709 case UNEQ:
24710 case LTGT:
24711 check_nan = true;
24712 libfunc = optab_libfunc (eq_optab, mode);
24713 code = (code = UNEQ) ? EQ : NE;
24714 break;
24716 default:
24717 gcc_unreachable ();
24720 gcc_assert (libfunc);
24722 if (!check_nan)
24723 dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
24724 SImode, 2, op0, mode, op1, mode);
24726 /* The library signals an exception for signalling NaNs, so we need to
24727 handle isgreater, etc. by first checking isordered. */
24728 else
24730 rtx ne_rtx, normal_dest, unord_dest;
24731 rtx unord_func = optab_libfunc (unord_optab, mode);
24732 rtx join_label = gen_label_rtx ();
24733 rtx join_ref = gen_rtx_LABEL_REF (VOIDmode, join_label);
24734 rtx unord_cmp = gen_reg_rtx (comp_mode);
24737 /* Test for either value being a NaN. */
24738 gcc_assert (unord_func);
24739 unord_dest = emit_library_call_value (unord_func, NULL_RTX, LCT_CONST,
24740 SImode, 2, op0, mode, op1,
24741 mode);
24743 /* Set value (0) if either value is a NaN, and jump to the join
24744 label. */
24745 dest = gen_reg_rtx (SImode);
24746 emit_move_insn (dest, const1_rtx);
24747 emit_insn (gen_rtx_SET (unord_cmp,
24748 gen_rtx_COMPARE (comp_mode, unord_dest,
24749 const0_rtx)));
24751 ne_rtx = gen_rtx_NE (comp_mode, unord_cmp, const0_rtx);
24752 emit_jump_insn (gen_rtx_SET (pc_rtx,
24753 gen_rtx_IF_THEN_ELSE (VOIDmode, ne_rtx,
24754 join_ref,
24755 pc_rtx)));
24757 /* Do the normal comparison, knowing that the values are not
24758 NaNs. */
24759 normal_dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
24760 SImode, 2, op0, mode, op1,
24761 mode);
24763 emit_insn (gen_cstoresi4 (dest,
24764 gen_rtx_fmt_ee (code, SImode, normal_dest,
24765 const0_rtx),
24766 normal_dest, const0_rtx));
24768 /* Join NaN and non-Nan paths. Compare dest against 0. */
24769 emit_label (join_label);
24770 code = NE;
24773 emit_insn (gen_rtx_SET (compare_result,
24774 gen_rtx_COMPARE (comp_mode, dest, const0_rtx)));
24777 else
24779 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
24780 CLOBBERs to match cmptf_internal2 pattern. */
24781 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
24782 && FLOAT128_IBM_P (GET_MODE (op0))
24783 && TARGET_HARD_FLOAT && TARGET_FPRS)
24784 emit_insn (gen_rtx_PARALLEL (VOIDmode,
24785 gen_rtvec (10,
24786 gen_rtx_SET (compare_result,
24787 gen_rtx_COMPARE (comp_mode, op0, op1)),
24788 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
24789 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
24790 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
24791 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
24792 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
24793 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
24794 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
24795 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
24796 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
24797 else if (GET_CODE (op1) == UNSPEC
24798 && XINT (op1, 1) == UNSPEC_SP_TEST)
24800 rtx op1b = XVECEXP (op1, 0, 0);
24801 comp_mode = CCEQmode;
24802 compare_result = gen_reg_rtx (CCEQmode);
24803 if (TARGET_64BIT)
24804 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
24805 else
24806 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
24808 else
24809 emit_insn (gen_rtx_SET (compare_result,
24810 gen_rtx_COMPARE (comp_mode, op0, op1)));
24813 /* Some kinds of FP comparisons need an OR operation;
24814 under flag_finite_math_only we don't bother. */
24815 if (FLOAT_MODE_P (mode)
24816 && (!FLOAT128_IEEE_P (mode) || TARGET_FLOAT128_HW)
24817 && !flag_finite_math_only
24818 && !(TARGET_HARD_FLOAT && !TARGET_FPRS)
24819 && (code == LE || code == GE
24820 || code == UNEQ || code == LTGT
24821 || code == UNGT || code == UNLT))
24823 enum rtx_code or1, or2;
24824 rtx or1_rtx, or2_rtx, compare2_rtx;
24825 rtx or_result = gen_reg_rtx (CCEQmode);
24827 switch (code)
24829 case LE: or1 = LT; or2 = EQ; break;
24830 case GE: or1 = GT; or2 = EQ; break;
24831 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
24832 case LTGT: or1 = LT; or2 = GT; break;
24833 case UNGT: or1 = UNORDERED; or2 = GT; break;
24834 case UNLT: or1 = UNORDERED; or2 = LT; break;
24835 default: gcc_unreachable ();
24837 validate_condition_mode (or1, comp_mode);
24838 validate_condition_mode (or2, comp_mode);
24839 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
24840 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
24841 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
24842 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
24843 const_true_rtx);
24844 emit_insn (gen_rtx_SET (or_result, compare2_rtx));
24846 compare_result = or_result;
24847 code = EQ;
24850 validate_condition_mode (code, GET_MODE (compare_result));
24852 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
24856 /* Return the diagnostic message string if the binary operation OP is
24857 not permitted on TYPE1 and TYPE2, NULL otherwise. */
24859 static const char*
24860 rs6000_invalid_binary_op (int op ATTRIBUTE_UNUSED,
24861 const_tree type1,
24862 const_tree type2)
24864 machine_mode mode1 = TYPE_MODE (type1);
24865 machine_mode mode2 = TYPE_MODE (type2);
24867 /* For complex modes, use the inner type. */
24868 if (COMPLEX_MODE_P (mode1))
24869 mode1 = GET_MODE_INNER (mode1);
24871 if (COMPLEX_MODE_P (mode2))
24872 mode2 = GET_MODE_INNER (mode2);
24874 /* Don't allow IEEE 754R 128-bit binary floating point and IBM extended
24875 double to intermix unless -mfloat128-convert. */
24876 if (mode1 == mode2)
24877 return NULL;
24879 if (!TARGET_FLOAT128_CVT)
24881 if ((mode1 == KFmode && mode2 == IFmode)
24882 || (mode1 == IFmode && mode2 == KFmode))
24883 return N_("__float128 and __ibm128 cannot be used in the same "
24884 "expression");
24886 if (TARGET_IEEEQUAD
24887 && ((mode1 == IFmode && mode2 == TFmode)
24888 || (mode1 == TFmode && mode2 == IFmode)))
24889 return N_("__ibm128 and long double cannot be used in the same "
24890 "expression");
24892 if (!TARGET_IEEEQUAD
24893 && ((mode1 == KFmode && mode2 == TFmode)
24894 || (mode1 == TFmode && mode2 == KFmode)))
24895 return N_("__float128 and long double cannot be used in the same "
24896 "expression");
24899 return NULL;
24903 /* Expand floating point conversion to/from __float128 and __ibm128. */
24905 void
24906 rs6000_expand_float128_convert (rtx dest, rtx src, bool unsigned_p)
24908 machine_mode dest_mode = GET_MODE (dest);
24909 machine_mode src_mode = GET_MODE (src);
24910 convert_optab cvt = unknown_optab;
24911 bool do_move = false;
24912 rtx libfunc = NULL_RTX;
24913 rtx dest2;
24914 typedef rtx (*rtx_2func_t) (rtx, rtx);
24915 rtx_2func_t hw_convert = (rtx_2func_t)0;
24916 size_t kf_or_tf;
24918 struct hw_conv_t {
24919 rtx_2func_t from_df;
24920 rtx_2func_t from_sf;
24921 rtx_2func_t from_si_sign;
24922 rtx_2func_t from_si_uns;
24923 rtx_2func_t from_di_sign;
24924 rtx_2func_t from_di_uns;
24925 rtx_2func_t to_df;
24926 rtx_2func_t to_sf;
24927 rtx_2func_t to_si_sign;
24928 rtx_2func_t to_si_uns;
24929 rtx_2func_t to_di_sign;
24930 rtx_2func_t to_di_uns;
24931 } hw_conversions[2] = {
24932 /* convertions to/from KFmode */
24934 gen_extenddfkf2_hw, /* KFmode <- DFmode. */
24935 gen_extendsfkf2_hw, /* KFmode <- SFmode. */
24936 gen_float_kfsi2_hw, /* KFmode <- SImode (signed). */
24937 gen_floatuns_kfsi2_hw, /* KFmode <- SImode (unsigned). */
24938 gen_float_kfdi2_hw, /* KFmode <- DImode (signed). */
24939 gen_floatuns_kfdi2_hw, /* KFmode <- DImode (unsigned). */
24940 gen_trunckfdf2_hw, /* DFmode <- KFmode. */
24941 gen_trunckfsf2_hw, /* SFmode <- KFmode. */
24942 gen_fix_kfsi2_hw, /* SImode <- KFmode (signed). */
24943 gen_fixuns_kfsi2_hw, /* SImode <- KFmode (unsigned). */
24944 gen_fix_kfdi2_hw, /* DImode <- KFmode (signed). */
24945 gen_fixuns_kfdi2_hw, /* DImode <- KFmode (unsigned). */
24948 /* convertions to/from TFmode */
24950 gen_extenddftf2_hw, /* TFmode <- DFmode. */
24951 gen_extendsftf2_hw, /* TFmode <- SFmode. */
24952 gen_float_tfsi2_hw, /* TFmode <- SImode (signed). */
24953 gen_floatuns_tfsi2_hw, /* TFmode <- SImode (unsigned). */
24954 gen_float_tfdi2_hw, /* TFmode <- DImode (signed). */
24955 gen_floatuns_tfdi2_hw, /* TFmode <- DImode (unsigned). */
24956 gen_trunctfdf2_hw, /* DFmode <- TFmode. */
24957 gen_trunctfsf2_hw, /* SFmode <- TFmode. */
24958 gen_fix_tfsi2_hw, /* SImode <- TFmode (signed). */
24959 gen_fixuns_tfsi2_hw, /* SImode <- TFmode (unsigned). */
24960 gen_fix_tfdi2_hw, /* DImode <- TFmode (signed). */
24961 gen_fixuns_tfdi2_hw, /* DImode <- TFmode (unsigned). */
24965 if (dest_mode == src_mode)
24966 gcc_unreachable ();
24968 /* Eliminate memory operations. */
24969 if (MEM_P (src))
24970 src = force_reg (src_mode, src);
24972 if (MEM_P (dest))
24974 rtx tmp = gen_reg_rtx (dest_mode);
24975 rs6000_expand_float128_convert (tmp, src, unsigned_p);
24976 rs6000_emit_move (dest, tmp, dest_mode);
24977 return;
24980 /* Convert to IEEE 128-bit floating point. */
24981 if (FLOAT128_IEEE_P (dest_mode))
24983 if (dest_mode == KFmode)
24984 kf_or_tf = 0;
24985 else if (dest_mode == TFmode)
24986 kf_or_tf = 1;
24987 else
24988 gcc_unreachable ();
24990 switch (src_mode)
24992 case DFmode:
24993 cvt = sext_optab;
24994 hw_convert = hw_conversions[kf_or_tf].from_df;
24995 break;
24997 case SFmode:
24998 cvt = sext_optab;
24999 hw_convert = hw_conversions[kf_or_tf].from_sf;
25000 break;
25002 case KFmode:
25003 case IFmode:
25004 case TFmode:
25005 if (FLOAT128_IBM_P (src_mode))
25006 cvt = sext_optab;
25007 else
25008 do_move = true;
25009 break;
25011 case SImode:
25012 if (unsigned_p)
25014 cvt = ufloat_optab;
25015 hw_convert = hw_conversions[kf_or_tf].from_si_uns;
25017 else
25019 cvt = sfloat_optab;
25020 hw_convert = hw_conversions[kf_or_tf].from_si_sign;
25022 break;
25024 case DImode:
25025 if (unsigned_p)
25027 cvt = ufloat_optab;
25028 hw_convert = hw_conversions[kf_or_tf].from_di_uns;
25030 else
25032 cvt = sfloat_optab;
25033 hw_convert = hw_conversions[kf_or_tf].from_di_sign;
25035 break;
25037 default:
25038 gcc_unreachable ();
25042 /* Convert from IEEE 128-bit floating point. */
25043 else if (FLOAT128_IEEE_P (src_mode))
25045 if (src_mode == KFmode)
25046 kf_or_tf = 0;
25047 else if (src_mode == TFmode)
25048 kf_or_tf = 1;
25049 else
25050 gcc_unreachable ();
25052 switch (dest_mode)
25054 case DFmode:
25055 cvt = trunc_optab;
25056 hw_convert = hw_conversions[kf_or_tf].to_df;
25057 break;
25059 case SFmode:
25060 cvt = trunc_optab;
25061 hw_convert = hw_conversions[kf_or_tf].to_sf;
25062 break;
25064 case KFmode:
25065 case IFmode:
25066 case TFmode:
25067 if (FLOAT128_IBM_P (dest_mode))
25068 cvt = trunc_optab;
25069 else
25070 do_move = true;
25071 break;
25073 case SImode:
25074 if (unsigned_p)
25076 cvt = ufix_optab;
25077 hw_convert = hw_conversions[kf_or_tf].to_si_uns;
25079 else
25081 cvt = sfix_optab;
25082 hw_convert = hw_conversions[kf_or_tf].to_si_sign;
25084 break;
25086 case DImode:
25087 if (unsigned_p)
25089 cvt = ufix_optab;
25090 hw_convert = hw_conversions[kf_or_tf].to_di_uns;
25092 else
25094 cvt = sfix_optab;
25095 hw_convert = hw_conversions[kf_or_tf].to_di_sign;
25097 break;
25099 default:
25100 gcc_unreachable ();
25104 /* Both IBM format. */
25105 else if (FLOAT128_IBM_P (dest_mode) && FLOAT128_IBM_P (src_mode))
25106 do_move = true;
25108 else
25109 gcc_unreachable ();
25111 /* Handle conversion between TFmode/KFmode. */
25112 if (do_move)
25113 emit_move_insn (dest, gen_lowpart (dest_mode, src));
25115 /* Handle conversion if we have hardware support. */
25116 else if (TARGET_FLOAT128_HW && hw_convert)
25117 emit_insn ((hw_convert) (dest, src));
25119 /* Call an external function to do the conversion. */
25120 else if (cvt != unknown_optab)
25122 libfunc = convert_optab_libfunc (cvt, dest_mode, src_mode);
25123 gcc_assert (libfunc != NULL_RTX);
25125 dest2 = emit_library_call_value (libfunc, dest, LCT_CONST, dest_mode, 1, src,
25126 src_mode);
25128 gcc_assert (dest2 != NULL_RTX);
25129 if (!rtx_equal_p (dest, dest2))
25130 emit_move_insn (dest, dest2);
25133 else
25134 gcc_unreachable ();
25136 return;
25140 /* Emit the RTL for an sISEL pattern. */
25142 void
25143 rs6000_emit_sISEL (machine_mode mode ATTRIBUTE_UNUSED, rtx operands[])
25145 rs6000_emit_int_cmove (operands[0], operands[1], const1_rtx, const0_rtx);
25148 /* Emit RTL that sets a register to zero if OP1 and OP2 are equal. SCRATCH
25149 can be used as that dest register. Return the dest register. */
25152 rs6000_emit_eqne (machine_mode mode, rtx op1, rtx op2, rtx scratch)
25154 if (op2 == const0_rtx)
25155 return op1;
25157 if (GET_CODE (scratch) == SCRATCH)
25158 scratch = gen_reg_rtx (mode);
25160 if (logical_operand (op2, mode))
25161 emit_insn (gen_rtx_SET (scratch, gen_rtx_XOR (mode, op1, op2)));
25162 else
25163 emit_insn (gen_rtx_SET (scratch,
25164 gen_rtx_PLUS (mode, op1, negate_rtx (mode, op2))));
25166 return scratch;
25169 void
25170 rs6000_emit_sCOND (machine_mode mode, rtx operands[])
25172 rtx condition_rtx;
25173 machine_mode op_mode;
25174 enum rtx_code cond_code;
25175 rtx result = operands[0];
25177 condition_rtx = rs6000_generate_compare (operands[1], mode);
25178 cond_code = GET_CODE (condition_rtx);
25180 if (FLOAT_MODE_P (mode)
25181 && !TARGET_FPRS && TARGET_HARD_FLOAT)
25183 rtx t;
25185 PUT_MODE (condition_rtx, SImode);
25186 t = XEXP (condition_rtx, 0);
25188 gcc_assert (cond_code == NE || cond_code == EQ);
25190 if (cond_code == NE)
25191 emit_insn (gen_e500_flip_gt_bit (t, t));
25193 emit_insn (gen_move_from_CR_gt_bit (result, t));
25194 return;
25197 if (cond_code == NE
25198 || cond_code == GE || cond_code == LE
25199 || cond_code == GEU || cond_code == LEU
25200 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
25202 rtx not_result = gen_reg_rtx (CCEQmode);
25203 rtx not_op, rev_cond_rtx;
25204 machine_mode cc_mode;
25206 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
25208 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
25209 SImode, XEXP (condition_rtx, 0), const0_rtx);
25210 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
25211 emit_insn (gen_rtx_SET (not_result, not_op));
25212 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
25215 op_mode = GET_MODE (XEXP (operands[1], 0));
25216 if (op_mode == VOIDmode)
25217 op_mode = GET_MODE (XEXP (operands[1], 1));
25219 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
25221 PUT_MODE (condition_rtx, DImode);
25222 convert_move (result, condition_rtx, 0);
25224 else
25226 PUT_MODE (condition_rtx, SImode);
25227 emit_insn (gen_rtx_SET (result, condition_rtx));
25231 /* Emit a branch of kind CODE to location LOC. */
25233 void
25234 rs6000_emit_cbranch (machine_mode mode, rtx operands[])
25236 rtx condition_rtx, loc_ref;
25238 condition_rtx = rs6000_generate_compare (operands[0], mode);
25239 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
25240 emit_jump_insn (gen_rtx_SET (pc_rtx,
25241 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
25242 loc_ref, pc_rtx)));
25245 /* Return the string to output a conditional branch to LABEL, which is
25246 the operand template of the label, or NULL if the branch is really a
25247 conditional return.
25249 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
25250 condition code register and its mode specifies what kind of
25251 comparison we made.
25253 REVERSED is nonzero if we should reverse the sense of the comparison.
25255 INSN is the insn. */
25257 char *
25258 output_cbranch (rtx op, const char *label, int reversed, rtx_insn *insn)
25260 static char string[64];
25261 enum rtx_code code = GET_CODE (op);
25262 rtx cc_reg = XEXP (op, 0);
25263 machine_mode mode = GET_MODE (cc_reg);
25264 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
25265 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
25266 int really_reversed = reversed ^ need_longbranch;
25267 char *s = string;
25268 const char *ccode;
25269 const char *pred;
25270 rtx note;
25272 validate_condition_mode (code, mode);
25274 /* Work out which way this really branches. We could use
25275 reverse_condition_maybe_unordered here always but this
25276 makes the resulting assembler clearer. */
25277 if (really_reversed)
25279 /* Reversal of FP compares takes care -- an ordered compare
25280 becomes an unordered compare and vice versa. */
25281 if (mode == CCFPmode)
25282 code = reverse_condition_maybe_unordered (code);
25283 else
25284 code = reverse_condition (code);
25287 if ((!TARGET_FPRS && TARGET_HARD_FLOAT) && mode == CCFPmode)
25289 /* The efscmp/tst* instructions twiddle bit 2, which maps nicely
25290 to the GT bit. */
25291 switch (code)
25293 case EQ:
25294 /* Opposite of GT. */
25295 code = GT;
25296 break;
25298 case NE:
25299 code = UNLE;
25300 break;
25302 default:
25303 gcc_unreachable ();
25307 switch (code)
25309 /* Not all of these are actually distinct opcodes, but
25310 we distinguish them for clarity of the resulting assembler. */
25311 case NE: case LTGT:
25312 ccode = "ne"; break;
25313 case EQ: case UNEQ:
25314 ccode = "eq"; break;
25315 case GE: case GEU:
25316 ccode = "ge"; break;
25317 case GT: case GTU: case UNGT:
25318 ccode = "gt"; break;
25319 case LE: case LEU:
25320 ccode = "le"; break;
25321 case LT: case LTU: case UNLT:
25322 ccode = "lt"; break;
25323 case UNORDERED: ccode = "un"; break;
25324 case ORDERED: ccode = "nu"; break;
25325 case UNGE: ccode = "nl"; break;
25326 case UNLE: ccode = "ng"; break;
25327 default:
25328 gcc_unreachable ();
25331 /* Maybe we have a guess as to how likely the branch is. */
25332 pred = "";
25333 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
25334 if (note != NULL_RTX)
25336 /* PROB is the difference from 50%. */
25337 int prob = profile_probability::from_reg_br_prob_note (XINT (note, 0))
25338 .to_reg_br_prob_base () - REG_BR_PROB_BASE / 2;
25340 /* Only hint for highly probable/improbable branches on newer cpus when
25341 we have real profile data, as static prediction overrides processor
25342 dynamic prediction. For older cpus we may as well always hint, but
25343 assume not taken for branches that are very close to 50% as a
25344 mispredicted taken branch is more expensive than a
25345 mispredicted not-taken branch. */
25346 if (rs6000_always_hint
25347 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
25348 && (profile_status_for_fn (cfun) != PROFILE_GUESSED)
25349 && br_prob_note_reliable_p (note)))
25351 if (abs (prob) > REG_BR_PROB_BASE / 20
25352 && ((prob > 0) ^ need_longbranch))
25353 pred = "+";
25354 else
25355 pred = "-";
25359 if (label == NULL)
25360 s += sprintf (s, "b%slr%s ", ccode, pred);
25361 else
25362 s += sprintf (s, "b%s%s ", ccode, pred);
25364 /* We need to escape any '%' characters in the reg_names string.
25365 Assume they'd only be the first character.... */
25366 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
25367 *s++ = '%';
25368 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
25370 if (label != NULL)
25372 /* If the branch distance was too far, we may have to use an
25373 unconditional branch to go the distance. */
25374 if (need_longbranch)
25375 s += sprintf (s, ",$+8\n\tb %s", label);
25376 else
25377 s += sprintf (s, ",%s", label);
25380 return string;
25383 /* Return the string to flip the GT bit on a CR. */
25384 char *
25385 output_e500_flip_gt_bit (rtx dst, rtx src)
25387 static char string[64];
25388 int a, b;
25390 gcc_assert (GET_CODE (dst) == REG && CR_REGNO_P (REGNO (dst))
25391 && GET_CODE (src) == REG && CR_REGNO_P (REGNO (src)));
25393 /* GT bit. */
25394 a = 4 * (REGNO (dst) - CR0_REGNO) + 1;
25395 b = 4 * (REGNO (src) - CR0_REGNO) + 1;
25397 sprintf (string, "crnot %d,%d", a, b);
25398 return string;
25401 /* Return insn for VSX or Altivec comparisons. */
25403 static rtx
25404 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
25406 rtx mask;
25407 machine_mode mode = GET_MODE (op0);
25409 switch (code)
25411 default:
25412 break;
25414 case GE:
25415 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
25416 return NULL_RTX;
25417 /* FALLTHRU */
25419 case EQ:
25420 case GT:
25421 case GTU:
25422 case ORDERED:
25423 case UNORDERED:
25424 case UNEQ:
25425 case LTGT:
25426 mask = gen_reg_rtx (mode);
25427 emit_insn (gen_rtx_SET (mask, gen_rtx_fmt_ee (code, mode, op0, op1)));
25428 return mask;
25431 return NULL_RTX;
25434 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
25435 DMODE is expected destination mode. This is a recursive function. */
25437 static rtx
25438 rs6000_emit_vector_compare (enum rtx_code rcode,
25439 rtx op0, rtx op1,
25440 machine_mode dmode)
25442 rtx mask;
25443 bool swap_operands = false;
25444 bool try_again = false;
25446 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
25447 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
25449 /* See if the comparison works as is. */
25450 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
25451 if (mask)
25452 return mask;
25454 switch (rcode)
25456 case LT:
25457 rcode = GT;
25458 swap_operands = true;
25459 try_again = true;
25460 break;
25461 case LTU:
25462 rcode = GTU;
25463 swap_operands = true;
25464 try_again = true;
25465 break;
25466 case NE:
25467 case UNLE:
25468 case UNLT:
25469 case UNGE:
25470 case UNGT:
25471 /* Invert condition and try again.
25472 e.g., A != B becomes ~(A==B). */
25474 enum rtx_code rev_code;
25475 enum insn_code nor_code;
25476 rtx mask2;
25478 rev_code = reverse_condition_maybe_unordered (rcode);
25479 if (rev_code == UNKNOWN)
25480 return NULL_RTX;
25482 nor_code = optab_handler (one_cmpl_optab, dmode);
25483 if (nor_code == CODE_FOR_nothing)
25484 return NULL_RTX;
25486 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
25487 if (!mask2)
25488 return NULL_RTX;
25490 mask = gen_reg_rtx (dmode);
25491 emit_insn (GEN_FCN (nor_code) (mask, mask2));
25492 return mask;
25494 break;
25495 case GE:
25496 case GEU:
25497 case LE:
25498 case LEU:
25499 /* Try GT/GTU/LT/LTU OR EQ */
25501 rtx c_rtx, eq_rtx;
25502 enum insn_code ior_code;
25503 enum rtx_code new_code;
25505 switch (rcode)
25507 case GE:
25508 new_code = GT;
25509 break;
25511 case GEU:
25512 new_code = GTU;
25513 break;
25515 case LE:
25516 new_code = LT;
25517 break;
25519 case LEU:
25520 new_code = LTU;
25521 break;
25523 default:
25524 gcc_unreachable ();
25527 ior_code = optab_handler (ior_optab, dmode);
25528 if (ior_code == CODE_FOR_nothing)
25529 return NULL_RTX;
25531 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
25532 if (!c_rtx)
25533 return NULL_RTX;
25535 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
25536 if (!eq_rtx)
25537 return NULL_RTX;
25539 mask = gen_reg_rtx (dmode);
25540 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
25541 return mask;
25543 break;
25544 default:
25545 return NULL_RTX;
25548 if (try_again)
25550 if (swap_operands)
25551 std::swap (op0, op1);
25553 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
25554 if (mask)
25555 return mask;
25558 /* You only get two chances. */
25559 return NULL_RTX;
25562 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
25563 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
25564 operands for the relation operation COND. */
25567 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
25568 rtx cond, rtx cc_op0, rtx cc_op1)
25570 machine_mode dest_mode = GET_MODE (dest);
25571 machine_mode mask_mode = GET_MODE (cc_op0);
25572 enum rtx_code rcode = GET_CODE (cond);
25573 machine_mode cc_mode = CCmode;
25574 rtx mask;
25575 rtx cond2;
25576 bool invert_move = false;
25578 if (VECTOR_UNIT_NONE_P (dest_mode))
25579 return 0;
25581 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
25582 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
25584 switch (rcode)
25586 /* Swap operands if we can, and fall back to doing the operation as
25587 specified, and doing a NOR to invert the test. */
25588 case NE:
25589 case UNLE:
25590 case UNLT:
25591 case UNGE:
25592 case UNGT:
25593 /* Invert condition and try again.
25594 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
25595 invert_move = true;
25596 rcode = reverse_condition_maybe_unordered (rcode);
25597 if (rcode == UNKNOWN)
25598 return 0;
25599 break;
25601 case GE:
25602 case LE:
25603 if (GET_MODE_CLASS (mask_mode) == MODE_VECTOR_INT)
25605 /* Invert condition to avoid compound test. */
25606 invert_move = true;
25607 rcode = reverse_condition (rcode);
25609 break;
25611 case GTU:
25612 case GEU:
25613 case LTU:
25614 case LEU:
25615 /* Mark unsigned tests with CCUNSmode. */
25616 cc_mode = CCUNSmode;
25618 /* Invert condition to avoid compound test if necessary. */
25619 if (rcode == GEU || rcode == LEU)
25621 invert_move = true;
25622 rcode = reverse_condition (rcode);
25624 break;
25626 default:
25627 break;
25630 /* Get the vector mask for the given relational operations. */
25631 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
25633 if (!mask)
25634 return 0;
25636 if (invert_move)
25637 std::swap (op_true, op_false);
25639 /* Optimize vec1 == vec2, to know the mask generates -1/0. */
25640 if (GET_MODE_CLASS (dest_mode) == MODE_VECTOR_INT
25641 && (GET_CODE (op_true) == CONST_VECTOR
25642 || GET_CODE (op_false) == CONST_VECTOR))
25644 rtx constant_0 = CONST0_RTX (dest_mode);
25645 rtx constant_m1 = CONSTM1_RTX (dest_mode);
25647 if (op_true == constant_m1 && op_false == constant_0)
25649 emit_move_insn (dest, mask);
25650 return 1;
25653 else if (op_true == constant_0 && op_false == constant_m1)
25655 emit_insn (gen_rtx_SET (dest, gen_rtx_NOT (dest_mode, mask)));
25656 return 1;
25659 /* If we can't use the vector comparison directly, perhaps we can use
25660 the mask for the true or false fields, instead of loading up a
25661 constant. */
25662 if (op_true == constant_m1)
25663 op_true = mask;
25665 if (op_false == constant_0)
25666 op_false = mask;
25669 if (!REG_P (op_true) && !SUBREG_P (op_true))
25670 op_true = force_reg (dest_mode, op_true);
25672 if (!REG_P (op_false) && !SUBREG_P (op_false))
25673 op_false = force_reg (dest_mode, op_false);
25675 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
25676 CONST0_RTX (dest_mode));
25677 emit_insn (gen_rtx_SET (dest,
25678 gen_rtx_IF_THEN_ELSE (dest_mode,
25679 cond2,
25680 op_true,
25681 op_false)));
25682 return 1;
25685 /* ISA 3.0 (power9) minmax subcase to emit a XSMAXCDP or XSMINCDP instruction
25686 for SF/DF scalars. Move TRUE_COND to DEST if OP of the operands of the last
25687 comparison is nonzero/true, FALSE_COND if it is zero/false. Return 0 if the
25688 hardware has no such operation. */
25690 static int
25691 rs6000_emit_p9_fp_minmax (rtx dest, rtx op, rtx true_cond, rtx false_cond)
25693 enum rtx_code code = GET_CODE (op);
25694 rtx op0 = XEXP (op, 0);
25695 rtx op1 = XEXP (op, 1);
25696 machine_mode compare_mode = GET_MODE (op0);
25697 machine_mode result_mode = GET_MODE (dest);
25698 bool max_p = false;
25700 if (result_mode != compare_mode)
25701 return 0;
25703 if (code == GE || code == GT)
25704 max_p = true;
25705 else if (code == LE || code == LT)
25706 max_p = false;
25707 else
25708 return 0;
25710 if (rtx_equal_p (op0, true_cond) && rtx_equal_p (op1, false_cond))
25713 else if (rtx_equal_p (op1, true_cond) && rtx_equal_p (op0, false_cond))
25714 max_p = !max_p;
25716 else
25717 return 0;
25719 rs6000_emit_minmax (dest, max_p ? SMAX : SMIN, op0, op1);
25720 return 1;
25723 /* ISA 3.0 (power9) conditional move subcase to emit XSCMP{EQ,GE,GT,NE}DP and
25724 XXSEL instructions for SF/DF scalars. Move TRUE_COND to DEST if OP of the
25725 operands of the last comparison is nonzero/true, FALSE_COND if it is
25726 zero/false. Return 0 if the hardware has no such operation. */
25728 static int
25729 rs6000_emit_p9_fp_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
25731 enum rtx_code code = GET_CODE (op);
25732 rtx op0 = XEXP (op, 0);
25733 rtx op1 = XEXP (op, 1);
25734 machine_mode result_mode = GET_MODE (dest);
25735 rtx compare_rtx;
25736 rtx cmove_rtx;
25737 rtx clobber_rtx;
25739 if (!can_create_pseudo_p ())
25740 return 0;
25742 switch (code)
25744 case EQ:
25745 case GE:
25746 case GT:
25747 break;
25749 case NE:
25750 case LT:
25751 case LE:
25752 code = swap_condition (code);
25753 std::swap (op0, op1);
25754 break;
25756 default:
25757 return 0;
25760 /* Generate: [(parallel [(set (dest)
25761 (if_then_else (op (cmp1) (cmp2))
25762 (true)
25763 (false)))
25764 (clobber (scratch))])]. */
25766 compare_rtx = gen_rtx_fmt_ee (code, CCFPmode, op0, op1);
25767 cmove_rtx = gen_rtx_SET (dest,
25768 gen_rtx_IF_THEN_ELSE (result_mode,
25769 compare_rtx,
25770 true_cond,
25771 false_cond));
25773 clobber_rtx = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (V2DImode));
25774 emit_insn (gen_rtx_PARALLEL (VOIDmode,
25775 gen_rtvec (2, cmove_rtx, clobber_rtx)));
25777 return 1;
25780 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
25781 operands of the last comparison is nonzero/true, FALSE_COND if it
25782 is zero/false. Return 0 if the hardware has no such operation. */
25785 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
25787 enum rtx_code code = GET_CODE (op);
25788 rtx op0 = XEXP (op, 0);
25789 rtx op1 = XEXP (op, 1);
25790 machine_mode compare_mode = GET_MODE (op0);
25791 machine_mode result_mode = GET_MODE (dest);
25792 rtx temp;
25793 bool is_against_zero;
25795 /* These modes should always match. */
25796 if (GET_MODE (op1) != compare_mode
25797 /* In the isel case however, we can use a compare immediate, so
25798 op1 may be a small constant. */
25799 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
25800 return 0;
25801 if (GET_MODE (true_cond) != result_mode)
25802 return 0;
25803 if (GET_MODE (false_cond) != result_mode)
25804 return 0;
25806 /* See if we can use the ISA 3.0 (power9) min/max/compare functions. */
25807 if (TARGET_P9_MINMAX
25808 && (compare_mode == SFmode || compare_mode == DFmode)
25809 && (result_mode == SFmode || result_mode == DFmode))
25811 if (rs6000_emit_p9_fp_minmax (dest, op, true_cond, false_cond))
25812 return 1;
25814 if (rs6000_emit_p9_fp_cmove (dest, op, true_cond, false_cond))
25815 return 1;
25818 /* Don't allow using floating point comparisons for integer results for
25819 now. */
25820 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
25821 return 0;
25823 /* First, work out if the hardware can do this at all, or
25824 if it's too slow.... */
25825 if (!FLOAT_MODE_P (compare_mode))
25827 if (TARGET_ISEL)
25828 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
25829 return 0;
25831 else if (TARGET_HARD_FLOAT && !TARGET_FPRS
25832 && SCALAR_FLOAT_MODE_P (compare_mode))
25833 return 0;
25835 is_against_zero = op1 == CONST0_RTX (compare_mode);
25837 /* A floating-point subtract might overflow, underflow, or produce
25838 an inexact result, thus changing the floating-point flags, so it
25839 can't be generated if we care about that. It's safe if one side
25840 of the construct is zero, since then no subtract will be
25841 generated. */
25842 if (SCALAR_FLOAT_MODE_P (compare_mode)
25843 && flag_trapping_math && ! is_against_zero)
25844 return 0;
25846 /* Eliminate half of the comparisons by switching operands, this
25847 makes the remaining code simpler. */
25848 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
25849 || code == LTGT || code == LT || code == UNLE)
25851 code = reverse_condition_maybe_unordered (code);
25852 temp = true_cond;
25853 true_cond = false_cond;
25854 false_cond = temp;
25857 /* UNEQ and LTGT take four instructions for a comparison with zero,
25858 it'll probably be faster to use a branch here too. */
25859 if (code == UNEQ && HONOR_NANS (compare_mode))
25860 return 0;
25862 /* We're going to try to implement comparisons by performing
25863 a subtract, then comparing against zero. Unfortunately,
25864 Inf - Inf is NaN which is not zero, and so if we don't
25865 know that the operand is finite and the comparison
25866 would treat EQ different to UNORDERED, we can't do it. */
25867 if (HONOR_INFINITIES (compare_mode)
25868 && code != GT && code != UNGE
25869 && (GET_CODE (op1) != CONST_DOUBLE
25870 || real_isinf (CONST_DOUBLE_REAL_VALUE (op1)))
25871 /* Constructs of the form (a OP b ? a : b) are safe. */
25872 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
25873 || (! rtx_equal_p (op0, true_cond)
25874 && ! rtx_equal_p (op1, true_cond))))
25875 return 0;
25877 /* At this point we know we can use fsel. */
25879 /* Reduce the comparison to a comparison against zero. */
25880 if (! is_against_zero)
25882 temp = gen_reg_rtx (compare_mode);
25883 emit_insn (gen_rtx_SET (temp, gen_rtx_MINUS (compare_mode, op0, op1)));
25884 op0 = temp;
25885 op1 = CONST0_RTX (compare_mode);
25888 /* If we don't care about NaNs we can reduce some of the comparisons
25889 down to faster ones. */
25890 if (! HONOR_NANS (compare_mode))
25891 switch (code)
25893 case GT:
25894 code = LE;
25895 temp = true_cond;
25896 true_cond = false_cond;
25897 false_cond = temp;
25898 break;
25899 case UNGE:
25900 code = GE;
25901 break;
25902 case UNEQ:
25903 code = EQ;
25904 break;
25905 default:
25906 break;
25909 /* Now, reduce everything down to a GE. */
25910 switch (code)
25912 case GE:
25913 break;
25915 case LE:
25916 temp = gen_reg_rtx (compare_mode);
25917 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
25918 op0 = temp;
25919 break;
25921 case ORDERED:
25922 temp = gen_reg_rtx (compare_mode);
25923 emit_insn (gen_rtx_SET (temp, gen_rtx_ABS (compare_mode, op0)));
25924 op0 = temp;
25925 break;
25927 case EQ:
25928 temp = gen_reg_rtx (compare_mode);
25929 emit_insn (gen_rtx_SET (temp,
25930 gen_rtx_NEG (compare_mode,
25931 gen_rtx_ABS (compare_mode, op0))));
25932 op0 = temp;
25933 break;
25935 case UNGE:
25936 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
25937 temp = gen_reg_rtx (result_mode);
25938 emit_insn (gen_rtx_SET (temp,
25939 gen_rtx_IF_THEN_ELSE (result_mode,
25940 gen_rtx_GE (VOIDmode,
25941 op0, op1),
25942 true_cond, false_cond)));
25943 false_cond = true_cond;
25944 true_cond = temp;
25946 temp = gen_reg_rtx (compare_mode);
25947 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
25948 op0 = temp;
25949 break;
25951 case GT:
25952 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
25953 temp = gen_reg_rtx (result_mode);
25954 emit_insn (gen_rtx_SET (temp,
25955 gen_rtx_IF_THEN_ELSE (result_mode,
25956 gen_rtx_GE (VOIDmode,
25957 op0, op1),
25958 true_cond, false_cond)));
25959 true_cond = false_cond;
25960 false_cond = temp;
25962 temp = gen_reg_rtx (compare_mode);
25963 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
25964 op0 = temp;
25965 break;
25967 default:
25968 gcc_unreachable ();
25971 emit_insn (gen_rtx_SET (dest,
25972 gen_rtx_IF_THEN_ELSE (result_mode,
25973 gen_rtx_GE (VOIDmode,
25974 op0, op1),
25975 true_cond, false_cond)));
25976 return 1;
25979 /* Same as above, but for ints (isel). */
25981 static int
25982 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
25984 rtx condition_rtx, cr;
25985 machine_mode mode = GET_MODE (dest);
25986 enum rtx_code cond_code;
25987 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
25988 bool signedp;
25990 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
25991 return 0;
25993 /* We still have to do the compare, because isel doesn't do a
25994 compare, it just looks at the CRx bits set by a previous compare
25995 instruction. */
25996 condition_rtx = rs6000_generate_compare (op, mode);
25997 cond_code = GET_CODE (condition_rtx);
25998 cr = XEXP (condition_rtx, 0);
25999 signedp = GET_MODE (cr) == CCmode;
26001 isel_func = (mode == SImode
26002 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
26003 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
26005 switch (cond_code)
26007 case LT: case GT: case LTU: case GTU: case EQ:
26008 /* isel handles these directly. */
26009 break;
26011 default:
26012 /* We need to swap the sense of the comparison. */
26014 std::swap (false_cond, true_cond);
26015 PUT_CODE (condition_rtx, reverse_condition (cond_code));
26017 break;
26020 false_cond = force_reg (mode, false_cond);
26021 if (true_cond != const0_rtx)
26022 true_cond = force_reg (mode, true_cond);
26024 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
26026 return 1;
26029 const char *
26030 output_isel (rtx *operands)
26032 enum rtx_code code;
26034 code = GET_CODE (operands[1]);
26036 if (code == GE || code == GEU || code == LE || code == LEU || code == NE)
26038 gcc_assert (GET_CODE (operands[2]) == REG
26039 && GET_CODE (operands[3]) == REG);
26040 PUT_CODE (operands[1], reverse_condition (code));
26041 return "isel %0,%3,%2,%j1";
26044 return "isel %0,%2,%3,%j1";
26047 void
26048 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
26050 machine_mode mode = GET_MODE (op0);
26051 enum rtx_code c;
26052 rtx target;
26054 /* VSX/altivec have direct min/max insns. */
26055 if ((code == SMAX || code == SMIN)
26056 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
26057 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
26059 emit_insn (gen_rtx_SET (dest, gen_rtx_fmt_ee (code, mode, op0, op1)));
26060 return;
26063 if (code == SMAX || code == SMIN)
26064 c = GE;
26065 else
26066 c = GEU;
26068 if (code == SMAX || code == UMAX)
26069 target = emit_conditional_move (dest, c, op0, op1, mode,
26070 op0, op1, mode, 0);
26071 else
26072 target = emit_conditional_move (dest, c, op0, op1, mode,
26073 op1, op0, mode, 0);
26074 gcc_assert (target);
26075 if (target != dest)
26076 emit_move_insn (dest, target);
26079 /* Split a signbit operation on 64-bit machines with direct move. Also allow
26080 for the value to come from memory or if it is already loaded into a GPR. */
26082 void
26083 rs6000_split_signbit (rtx dest, rtx src)
26085 machine_mode d_mode = GET_MODE (dest);
26086 machine_mode s_mode = GET_MODE (src);
26087 rtx dest_di = (d_mode == DImode) ? dest : gen_lowpart (DImode, dest);
26088 rtx shift_reg = dest_di;
26090 gcc_assert (FLOAT128_IEEE_P (s_mode) && TARGET_POWERPC64);
26092 if (MEM_P (src))
26094 rtx mem = (WORDS_BIG_ENDIAN
26095 ? adjust_address (src, DImode, 0)
26096 : adjust_address (src, DImode, 8));
26097 emit_insn (gen_rtx_SET (dest_di, mem));
26100 else
26102 unsigned int r = reg_or_subregno (src);
26104 if (INT_REGNO_P (r))
26105 shift_reg = gen_rtx_REG (DImode, r + (BYTES_BIG_ENDIAN == 0));
26107 else
26109 /* Generate the special mfvsrd instruction to get it in a GPR. */
26110 gcc_assert (VSX_REGNO_P (r));
26111 if (s_mode == KFmode)
26112 emit_insn (gen_signbitkf2_dm2 (dest_di, src));
26113 else
26114 emit_insn (gen_signbittf2_dm2 (dest_di, src));
26118 emit_insn (gen_lshrdi3 (dest_di, shift_reg, GEN_INT (63)));
26119 return;
26122 /* A subroutine of the atomic operation splitters. Jump to LABEL if
26123 COND is true. Mark the jump as unlikely to be taken. */
26125 static void
26126 emit_unlikely_jump (rtx cond, rtx label)
26128 rtx x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
26129 rtx_insn *insn = emit_jump_insn (gen_rtx_SET (pc_rtx, x));
26130 add_reg_br_prob_note (insn, profile_probability::very_unlikely ());
26133 /* A subroutine of the atomic operation splitters. Emit a load-locked
26134 instruction in MODE. For QI/HImode, possibly use a pattern than includes
26135 the zero_extend operation. */
26137 static void
26138 emit_load_locked (machine_mode mode, rtx reg, rtx mem)
26140 rtx (*fn) (rtx, rtx) = NULL;
26142 switch (mode)
26144 case QImode:
26145 fn = gen_load_lockedqi;
26146 break;
26147 case HImode:
26148 fn = gen_load_lockedhi;
26149 break;
26150 case SImode:
26151 if (GET_MODE (mem) == QImode)
26152 fn = gen_load_lockedqi_si;
26153 else if (GET_MODE (mem) == HImode)
26154 fn = gen_load_lockedhi_si;
26155 else
26156 fn = gen_load_lockedsi;
26157 break;
26158 case DImode:
26159 fn = gen_load_lockeddi;
26160 break;
26161 case TImode:
26162 fn = gen_load_lockedti;
26163 break;
26164 default:
26165 gcc_unreachable ();
26167 emit_insn (fn (reg, mem));
26170 /* A subroutine of the atomic operation splitters. Emit a store-conditional
26171 instruction in MODE. */
26173 static void
26174 emit_store_conditional (machine_mode mode, rtx res, rtx mem, rtx val)
26176 rtx (*fn) (rtx, rtx, rtx) = NULL;
26178 switch (mode)
26180 case QImode:
26181 fn = gen_store_conditionalqi;
26182 break;
26183 case HImode:
26184 fn = gen_store_conditionalhi;
26185 break;
26186 case SImode:
26187 fn = gen_store_conditionalsi;
26188 break;
26189 case DImode:
26190 fn = gen_store_conditionaldi;
26191 break;
26192 case TImode:
26193 fn = gen_store_conditionalti;
26194 break;
26195 default:
26196 gcc_unreachable ();
26199 /* Emit sync before stwcx. to address PPC405 Erratum. */
26200 if (PPC405_ERRATUM77)
26201 emit_insn (gen_hwsync ());
26203 emit_insn (fn (res, mem, val));
26206 /* Expand barriers before and after a load_locked/store_cond sequence. */
26208 static rtx
26209 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
26211 rtx addr = XEXP (mem, 0);
26212 int strict_p = (reload_in_progress || reload_completed);
26214 if (!legitimate_indirect_address_p (addr, strict_p)
26215 && !legitimate_indexed_address_p (addr, strict_p))
26217 addr = force_reg (Pmode, addr);
26218 mem = replace_equiv_address_nv (mem, addr);
26221 switch (model)
26223 case MEMMODEL_RELAXED:
26224 case MEMMODEL_CONSUME:
26225 case MEMMODEL_ACQUIRE:
26226 break;
26227 case MEMMODEL_RELEASE:
26228 case MEMMODEL_ACQ_REL:
26229 emit_insn (gen_lwsync ());
26230 break;
26231 case MEMMODEL_SEQ_CST:
26232 emit_insn (gen_hwsync ());
26233 break;
26234 default:
26235 gcc_unreachable ();
26237 return mem;
26240 static void
26241 rs6000_post_atomic_barrier (enum memmodel model)
26243 switch (model)
26245 case MEMMODEL_RELAXED:
26246 case MEMMODEL_CONSUME:
26247 case MEMMODEL_RELEASE:
26248 break;
26249 case MEMMODEL_ACQUIRE:
26250 case MEMMODEL_ACQ_REL:
26251 case MEMMODEL_SEQ_CST:
26252 emit_insn (gen_isync ());
26253 break;
26254 default:
26255 gcc_unreachable ();
26259 /* A subroutine of the various atomic expanders. For sub-word operations,
26260 we must adjust things to operate on SImode. Given the original MEM,
26261 return a new aligned memory. Also build and return the quantities by
26262 which to shift and mask. */
26264 static rtx
26265 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
26267 rtx addr, align, shift, mask, mem;
26268 HOST_WIDE_INT shift_mask;
26269 machine_mode mode = GET_MODE (orig_mem);
26271 /* For smaller modes, we have to implement this via SImode. */
26272 shift_mask = (mode == QImode ? 0x18 : 0x10);
26274 addr = XEXP (orig_mem, 0);
26275 addr = force_reg (GET_MODE (addr), addr);
26277 /* Aligned memory containing subword. Generate a new memory. We
26278 do not want any of the existing MEM_ATTR data, as we're now
26279 accessing memory outside the original object. */
26280 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
26281 NULL_RTX, 1, OPTAB_LIB_WIDEN);
26282 mem = gen_rtx_MEM (SImode, align);
26283 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
26284 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
26285 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
26287 /* Shift amount for subword relative to aligned word. */
26288 shift = gen_reg_rtx (SImode);
26289 addr = gen_lowpart (SImode, addr);
26290 rtx tmp = gen_reg_rtx (SImode);
26291 emit_insn (gen_ashlsi3 (tmp, addr, GEN_INT (3)));
26292 emit_insn (gen_andsi3 (shift, tmp, GEN_INT (shift_mask)));
26293 if (BYTES_BIG_ENDIAN)
26294 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
26295 shift, 1, OPTAB_LIB_WIDEN);
26296 *pshift = shift;
26298 /* Mask for insertion. */
26299 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
26300 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
26301 *pmask = mask;
26303 return mem;
26306 /* A subroutine of the various atomic expanders. For sub-word operands,
26307 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
26309 static rtx
26310 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
26312 rtx x;
26314 x = gen_reg_rtx (SImode);
26315 emit_insn (gen_rtx_SET (x, gen_rtx_AND (SImode,
26316 gen_rtx_NOT (SImode, mask),
26317 oldval)));
26319 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
26321 return x;
26324 /* A subroutine of the various atomic expanders. For sub-word operands,
26325 extract WIDE to NARROW via SHIFT. */
26327 static void
26328 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
26330 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
26331 wide, 1, OPTAB_LIB_WIDEN);
26332 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
26335 /* Expand an atomic compare and swap operation. */
26337 void
26338 rs6000_expand_atomic_compare_and_swap (rtx operands[])
26340 rtx boolval, retval, mem, oldval, newval, cond;
26341 rtx label1, label2, x, mask, shift;
26342 machine_mode mode, orig_mode;
26343 enum memmodel mod_s, mod_f;
26344 bool is_weak;
26346 boolval = operands[0];
26347 retval = operands[1];
26348 mem = operands[2];
26349 oldval = operands[3];
26350 newval = operands[4];
26351 is_weak = (INTVAL (operands[5]) != 0);
26352 mod_s = memmodel_base (INTVAL (operands[6]));
26353 mod_f = memmodel_base (INTVAL (operands[7]));
26354 orig_mode = mode = GET_MODE (mem);
26356 mask = shift = NULL_RTX;
26357 if (mode == QImode || mode == HImode)
26359 /* Before power8, we didn't have access to lbarx/lharx, so generate a
26360 lwarx and shift/mask operations. With power8, we need to do the
26361 comparison in SImode, but the store is still done in QI/HImode. */
26362 oldval = convert_modes (SImode, mode, oldval, 1);
26364 if (!TARGET_SYNC_HI_QI)
26366 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
26368 /* Shift and mask OLDVAL into position with the word. */
26369 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
26370 NULL_RTX, 1, OPTAB_LIB_WIDEN);
26372 /* Shift and mask NEWVAL into position within the word. */
26373 newval = convert_modes (SImode, mode, newval, 1);
26374 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
26375 NULL_RTX, 1, OPTAB_LIB_WIDEN);
26378 /* Prepare to adjust the return value. */
26379 retval = gen_reg_rtx (SImode);
26380 mode = SImode;
26382 else if (reg_overlap_mentioned_p (retval, oldval))
26383 oldval = copy_to_reg (oldval);
26385 if (mode != TImode && !reg_or_short_operand (oldval, mode))
26386 oldval = copy_to_mode_reg (mode, oldval);
26388 if (reg_overlap_mentioned_p (retval, newval))
26389 newval = copy_to_reg (newval);
26391 mem = rs6000_pre_atomic_barrier (mem, mod_s);
26393 label1 = NULL_RTX;
26394 if (!is_weak)
26396 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
26397 emit_label (XEXP (label1, 0));
26399 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
26401 emit_load_locked (mode, retval, mem);
26403 x = retval;
26404 if (mask)
26405 x = expand_simple_binop (SImode, AND, retval, mask,
26406 NULL_RTX, 1, OPTAB_LIB_WIDEN);
26408 cond = gen_reg_rtx (CCmode);
26409 /* If we have TImode, synthesize a comparison. */
26410 if (mode != TImode)
26411 x = gen_rtx_COMPARE (CCmode, x, oldval);
26412 else
26414 rtx xor1_result = gen_reg_rtx (DImode);
26415 rtx xor2_result = gen_reg_rtx (DImode);
26416 rtx or_result = gen_reg_rtx (DImode);
26417 rtx new_word0 = simplify_gen_subreg (DImode, x, TImode, 0);
26418 rtx new_word1 = simplify_gen_subreg (DImode, x, TImode, 8);
26419 rtx old_word0 = simplify_gen_subreg (DImode, oldval, TImode, 0);
26420 rtx old_word1 = simplify_gen_subreg (DImode, oldval, TImode, 8);
26422 emit_insn (gen_xordi3 (xor1_result, new_word0, old_word0));
26423 emit_insn (gen_xordi3 (xor2_result, new_word1, old_word1));
26424 emit_insn (gen_iordi3 (or_result, xor1_result, xor2_result));
26425 x = gen_rtx_COMPARE (CCmode, or_result, const0_rtx);
26428 emit_insn (gen_rtx_SET (cond, x));
26430 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
26431 emit_unlikely_jump (x, label2);
26433 x = newval;
26434 if (mask)
26435 x = rs6000_mask_atomic_subword (retval, newval, mask);
26437 emit_store_conditional (orig_mode, cond, mem, x);
26439 if (!is_weak)
26441 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
26442 emit_unlikely_jump (x, label1);
26445 if (!is_mm_relaxed (mod_f))
26446 emit_label (XEXP (label2, 0));
26448 rs6000_post_atomic_barrier (mod_s);
26450 if (is_mm_relaxed (mod_f))
26451 emit_label (XEXP (label2, 0));
26453 if (shift)
26454 rs6000_finish_atomic_subword (operands[1], retval, shift);
26455 else if (mode != GET_MODE (operands[1]))
26456 convert_move (operands[1], retval, 1);
26458 /* In all cases, CR0 contains EQ on success, and NE on failure. */
26459 x = gen_rtx_EQ (SImode, cond, const0_rtx);
26460 emit_insn (gen_rtx_SET (boolval, x));
26463 /* Expand an atomic exchange operation. */
26465 void
26466 rs6000_expand_atomic_exchange (rtx operands[])
26468 rtx retval, mem, val, cond;
26469 machine_mode mode;
26470 enum memmodel model;
26471 rtx label, x, mask, shift;
26473 retval = operands[0];
26474 mem = operands[1];
26475 val = operands[2];
26476 model = memmodel_base (INTVAL (operands[3]));
26477 mode = GET_MODE (mem);
26479 mask = shift = NULL_RTX;
26480 if (!TARGET_SYNC_HI_QI && (mode == QImode || mode == HImode))
26482 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
26484 /* Shift and mask VAL into position with the word. */
26485 val = convert_modes (SImode, mode, val, 1);
26486 val = expand_simple_binop (SImode, ASHIFT, val, shift,
26487 NULL_RTX, 1, OPTAB_LIB_WIDEN);
26489 /* Prepare to adjust the return value. */
26490 retval = gen_reg_rtx (SImode);
26491 mode = SImode;
26494 mem = rs6000_pre_atomic_barrier (mem, model);
26496 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
26497 emit_label (XEXP (label, 0));
26499 emit_load_locked (mode, retval, mem);
26501 x = val;
26502 if (mask)
26503 x = rs6000_mask_atomic_subword (retval, val, mask);
26505 cond = gen_reg_rtx (CCmode);
26506 emit_store_conditional (mode, cond, mem, x);
26508 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
26509 emit_unlikely_jump (x, label);
26511 rs6000_post_atomic_barrier (model);
26513 if (shift)
26514 rs6000_finish_atomic_subword (operands[0], retval, shift);
26517 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
26518 to perform. MEM is the memory on which to operate. VAL is the second
26519 operand of the binary operator. BEFORE and AFTER are optional locations to
26520 return the value of MEM either before of after the operation. MODEL_RTX
26521 is a CONST_INT containing the memory model to use. */
26523 void
26524 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
26525 rtx orig_before, rtx orig_after, rtx model_rtx)
26527 enum memmodel model = memmodel_base (INTVAL (model_rtx));
26528 machine_mode mode = GET_MODE (mem);
26529 machine_mode store_mode = mode;
26530 rtx label, x, cond, mask, shift;
26531 rtx before = orig_before, after = orig_after;
26533 mask = shift = NULL_RTX;
26534 /* On power8, we want to use SImode for the operation. On previous systems,
26535 use the operation in a subword and shift/mask to get the proper byte or
26536 halfword. */
26537 if (mode == QImode || mode == HImode)
26539 if (TARGET_SYNC_HI_QI)
26541 val = convert_modes (SImode, mode, val, 1);
26543 /* Prepare to adjust the return value. */
26544 before = gen_reg_rtx (SImode);
26545 if (after)
26546 after = gen_reg_rtx (SImode);
26547 mode = SImode;
26549 else
26551 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
26553 /* Shift and mask VAL into position with the word. */
26554 val = convert_modes (SImode, mode, val, 1);
26555 val = expand_simple_binop (SImode, ASHIFT, val, shift,
26556 NULL_RTX, 1, OPTAB_LIB_WIDEN);
26558 switch (code)
26560 case IOR:
26561 case XOR:
26562 /* We've already zero-extended VAL. That is sufficient to
26563 make certain that it does not affect other bits. */
26564 mask = NULL;
26565 break;
26567 case AND:
26568 /* If we make certain that all of the other bits in VAL are
26569 set, that will be sufficient to not affect other bits. */
26570 x = gen_rtx_NOT (SImode, mask);
26571 x = gen_rtx_IOR (SImode, x, val);
26572 emit_insn (gen_rtx_SET (val, x));
26573 mask = NULL;
26574 break;
26576 case NOT:
26577 case PLUS:
26578 case MINUS:
26579 /* These will all affect bits outside the field and need
26580 adjustment via MASK within the loop. */
26581 break;
26583 default:
26584 gcc_unreachable ();
26587 /* Prepare to adjust the return value. */
26588 before = gen_reg_rtx (SImode);
26589 if (after)
26590 after = gen_reg_rtx (SImode);
26591 store_mode = mode = SImode;
26595 mem = rs6000_pre_atomic_barrier (mem, model);
26597 label = gen_label_rtx ();
26598 emit_label (label);
26599 label = gen_rtx_LABEL_REF (VOIDmode, label);
26601 if (before == NULL_RTX)
26602 before = gen_reg_rtx (mode);
26604 emit_load_locked (mode, before, mem);
26606 if (code == NOT)
26608 x = expand_simple_binop (mode, AND, before, val,
26609 NULL_RTX, 1, OPTAB_LIB_WIDEN);
26610 after = expand_simple_unop (mode, NOT, x, after, 1);
26612 else
26614 after = expand_simple_binop (mode, code, before, val,
26615 after, 1, OPTAB_LIB_WIDEN);
26618 x = after;
26619 if (mask)
26621 x = expand_simple_binop (SImode, AND, after, mask,
26622 NULL_RTX, 1, OPTAB_LIB_WIDEN);
26623 x = rs6000_mask_atomic_subword (before, x, mask);
26625 else if (store_mode != mode)
26626 x = convert_modes (store_mode, mode, x, 1);
26628 cond = gen_reg_rtx (CCmode);
26629 emit_store_conditional (store_mode, cond, mem, x);
26631 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
26632 emit_unlikely_jump (x, label);
26634 rs6000_post_atomic_barrier (model);
26636 if (shift)
26638 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
26639 then do the calcuations in a SImode register. */
26640 if (orig_before)
26641 rs6000_finish_atomic_subword (orig_before, before, shift);
26642 if (orig_after)
26643 rs6000_finish_atomic_subword (orig_after, after, shift);
26645 else if (store_mode != mode)
26647 /* QImode/HImode on machines with lbarx/lharx where we do the native
26648 operation and then do the calcuations in a SImode register. */
26649 if (orig_before)
26650 convert_move (orig_before, before, 1);
26651 if (orig_after)
26652 convert_move (orig_after, after, 1);
26654 else if (orig_after && after != orig_after)
26655 emit_move_insn (orig_after, after);
26658 /* Emit instructions to move SRC to DST. Called by splitters for
26659 multi-register moves. It will emit at most one instruction for
26660 each register that is accessed; that is, it won't emit li/lis pairs
26661 (or equivalent for 64-bit code). One of SRC or DST must be a hard
26662 register. */
26664 void
26665 rs6000_split_multireg_move (rtx dst, rtx src)
26667 /* The register number of the first register being moved. */
26668 int reg;
26669 /* The mode that is to be moved. */
26670 machine_mode mode;
26671 /* The mode that the move is being done in, and its size. */
26672 machine_mode reg_mode;
26673 int reg_mode_size;
26674 /* The number of registers that will be moved. */
26675 int nregs;
26677 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
26678 mode = GET_MODE (dst);
26679 nregs = hard_regno_nregs[reg][mode];
26680 if (FP_REGNO_P (reg))
26681 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
26682 ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? DFmode : SFmode);
26683 else if (ALTIVEC_REGNO_P (reg))
26684 reg_mode = V16QImode;
26685 else if (TARGET_E500_DOUBLE && FLOAT128_2REG_P (mode))
26686 reg_mode = DFmode;
26687 else
26688 reg_mode = word_mode;
26689 reg_mode_size = GET_MODE_SIZE (reg_mode);
26691 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
26693 /* TDmode residing in FP registers is special, since the ISA requires that
26694 the lower-numbered word of a register pair is always the most significant
26695 word, even in little-endian mode. This does not match the usual subreg
26696 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
26697 the appropriate constituent registers "by hand" in little-endian mode.
26699 Note we do not need to check for destructive overlap here since TDmode
26700 can only reside in even/odd register pairs. */
26701 if (FP_REGNO_P (reg) && DECIMAL_FLOAT_MODE_P (mode) && !BYTES_BIG_ENDIAN)
26703 rtx p_src, p_dst;
26704 int i;
26706 for (i = 0; i < nregs; i++)
26708 if (REG_P (src) && FP_REGNO_P (REGNO (src)))
26709 p_src = gen_rtx_REG (reg_mode, REGNO (src) + nregs - 1 - i);
26710 else
26711 p_src = simplify_gen_subreg (reg_mode, src, mode,
26712 i * reg_mode_size);
26714 if (REG_P (dst) && FP_REGNO_P (REGNO (dst)))
26715 p_dst = gen_rtx_REG (reg_mode, REGNO (dst) + nregs - 1 - i);
26716 else
26717 p_dst = simplify_gen_subreg (reg_mode, dst, mode,
26718 i * reg_mode_size);
26720 emit_insn (gen_rtx_SET (p_dst, p_src));
26723 return;
26726 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
26728 /* Move register range backwards, if we might have destructive
26729 overlap. */
26730 int i;
26731 for (i = nregs - 1; i >= 0; i--)
26732 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
26733 i * reg_mode_size),
26734 simplify_gen_subreg (reg_mode, src, mode,
26735 i * reg_mode_size)));
26737 else
26739 int i;
26740 int j = -1;
26741 bool used_update = false;
26742 rtx restore_basereg = NULL_RTX;
26744 if (MEM_P (src) && INT_REGNO_P (reg))
26746 rtx breg;
26748 if (GET_CODE (XEXP (src, 0)) == PRE_INC
26749 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
26751 rtx delta_rtx;
26752 breg = XEXP (XEXP (src, 0), 0);
26753 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
26754 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
26755 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
26756 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
26757 src = replace_equiv_address (src, breg);
26759 else if (! rs6000_offsettable_memref_p (src, reg_mode))
26761 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
26763 rtx basereg = XEXP (XEXP (src, 0), 0);
26764 if (TARGET_UPDATE)
26766 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
26767 emit_insn (gen_rtx_SET (ndst,
26768 gen_rtx_MEM (reg_mode,
26769 XEXP (src, 0))));
26770 used_update = true;
26772 else
26773 emit_insn (gen_rtx_SET (basereg,
26774 XEXP (XEXP (src, 0), 1)));
26775 src = replace_equiv_address (src, basereg);
26777 else
26779 rtx basereg = gen_rtx_REG (Pmode, reg);
26780 emit_insn (gen_rtx_SET (basereg, XEXP (src, 0)));
26781 src = replace_equiv_address (src, basereg);
26785 breg = XEXP (src, 0);
26786 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
26787 breg = XEXP (breg, 0);
26789 /* If the base register we are using to address memory is
26790 also a destination reg, then change that register last. */
26791 if (REG_P (breg)
26792 && REGNO (breg) >= REGNO (dst)
26793 && REGNO (breg) < REGNO (dst) + nregs)
26794 j = REGNO (breg) - REGNO (dst);
26796 else if (MEM_P (dst) && INT_REGNO_P (reg))
26798 rtx breg;
26800 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
26801 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
26803 rtx delta_rtx;
26804 breg = XEXP (XEXP (dst, 0), 0);
26805 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
26806 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
26807 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
26809 /* We have to update the breg before doing the store.
26810 Use store with update, if available. */
26812 if (TARGET_UPDATE)
26814 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
26815 emit_insn (TARGET_32BIT
26816 ? (TARGET_POWERPC64
26817 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
26818 : gen_movsi_update (breg, breg, delta_rtx, nsrc))
26819 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
26820 used_update = true;
26822 else
26823 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
26824 dst = replace_equiv_address (dst, breg);
26826 else if (!rs6000_offsettable_memref_p (dst, reg_mode)
26827 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
26829 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
26831 rtx basereg = XEXP (XEXP (dst, 0), 0);
26832 if (TARGET_UPDATE)
26834 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
26835 emit_insn (gen_rtx_SET (gen_rtx_MEM (reg_mode,
26836 XEXP (dst, 0)),
26837 nsrc));
26838 used_update = true;
26840 else
26841 emit_insn (gen_rtx_SET (basereg,
26842 XEXP (XEXP (dst, 0), 1)));
26843 dst = replace_equiv_address (dst, basereg);
26845 else
26847 rtx basereg = XEXP (XEXP (dst, 0), 0);
26848 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
26849 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
26850 && REG_P (basereg)
26851 && REG_P (offsetreg)
26852 && REGNO (basereg) != REGNO (offsetreg));
26853 if (REGNO (basereg) == 0)
26855 rtx tmp = offsetreg;
26856 offsetreg = basereg;
26857 basereg = tmp;
26859 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
26860 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
26861 dst = replace_equiv_address (dst, basereg);
26864 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
26865 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode));
26868 for (i = 0; i < nregs; i++)
26870 /* Calculate index to next subword. */
26871 ++j;
26872 if (j == nregs)
26873 j = 0;
26875 /* If compiler already emitted move of first word by
26876 store with update, no need to do anything. */
26877 if (j == 0 && used_update)
26878 continue;
26880 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
26881 j * reg_mode_size),
26882 simplify_gen_subreg (reg_mode, src, mode,
26883 j * reg_mode_size)));
26885 if (restore_basereg != NULL_RTX)
26886 emit_insn (restore_basereg);
26891 /* This page contains routines that are used to determine what the
26892 function prologue and epilogue code will do and write them out. */
26894 static inline bool
26895 save_reg_p (int r)
26897 return !call_used_regs[r] && df_regs_ever_live_p (r);
26900 /* Determine whether the gp REG is really used. */
26902 static bool
26903 rs6000_reg_live_or_pic_offset_p (int reg)
26905 /* We need to mark the PIC offset register live for the same conditions
26906 as it is set up, or otherwise it won't be saved before we clobber it. */
26908 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM && !TARGET_SINGLE_PIC_BASE)
26910 if (TARGET_TOC && TARGET_MINIMAL_TOC
26911 && (crtl->calls_eh_return
26912 || df_regs_ever_live_p (reg)
26913 || !constant_pool_empty_p ()))
26914 return true;
26916 if ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
26917 && flag_pic)
26918 return true;
26921 /* If the function calls eh_return, claim used all the registers that would
26922 be checked for liveness otherwise. */
26924 return ((crtl->calls_eh_return || df_regs_ever_live_p (reg))
26925 && !call_used_regs[reg]);
26928 /* Return the first fixed-point register that is required to be
26929 saved. 32 if none. */
26932 first_reg_to_save (void)
26934 int first_reg;
26936 /* Find lowest numbered live register. */
26937 for (first_reg = 13; first_reg <= 31; first_reg++)
26938 if (save_reg_p (first_reg))
26939 break;
26941 if (first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM
26942 && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
26943 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
26944 || (TARGET_TOC && TARGET_MINIMAL_TOC))
26945 && rs6000_reg_live_or_pic_offset_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
26946 first_reg = RS6000_PIC_OFFSET_TABLE_REGNUM;
26948 #if TARGET_MACHO
26949 if (flag_pic
26950 && crtl->uses_pic_offset_table
26951 && first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM)
26952 return RS6000_PIC_OFFSET_TABLE_REGNUM;
26953 #endif
26955 return first_reg;
26958 /* Similar, for FP regs. */
26961 first_fp_reg_to_save (void)
26963 int first_reg;
26965 /* Find lowest numbered live register. */
26966 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
26967 if (save_reg_p (first_reg))
26968 break;
26970 return first_reg;
26973 /* Similar, for AltiVec regs. */
26975 static int
26976 first_altivec_reg_to_save (void)
26978 int i;
26980 /* Stack frame remains as is unless we are in AltiVec ABI. */
26981 if (! TARGET_ALTIVEC_ABI)
26982 return LAST_ALTIVEC_REGNO + 1;
26984 /* On Darwin, the unwind routines are compiled without
26985 TARGET_ALTIVEC, and use save_world to save/restore the
26986 altivec registers when necessary. */
26987 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
26988 && ! TARGET_ALTIVEC)
26989 return FIRST_ALTIVEC_REGNO + 20;
26991 /* Find lowest numbered live register. */
26992 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
26993 if (save_reg_p (i))
26994 break;
26996 return i;
26999 /* Return a 32-bit mask of the AltiVec registers we need to set in
27000 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
27001 the 32-bit word is 0. */
27003 static unsigned int
27004 compute_vrsave_mask (void)
27006 unsigned int i, mask = 0;
27008 /* On Darwin, the unwind routines are compiled without
27009 TARGET_ALTIVEC, and use save_world to save/restore the
27010 call-saved altivec registers when necessary. */
27011 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
27012 && ! TARGET_ALTIVEC)
27013 mask |= 0xFFF;
27015 /* First, find out if we use _any_ altivec registers. */
27016 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
27017 if (df_regs_ever_live_p (i))
27018 mask |= ALTIVEC_REG_BIT (i);
27020 if (mask == 0)
27021 return mask;
27023 /* Next, remove the argument registers from the set. These must
27024 be in the VRSAVE mask set by the caller, so we don't need to add
27025 them in again. More importantly, the mask we compute here is
27026 used to generate CLOBBERs in the set_vrsave insn, and we do not
27027 wish the argument registers to die. */
27028 for (i = ALTIVEC_ARG_MIN_REG; i < (unsigned) crtl->args.info.vregno; i++)
27029 mask &= ~ALTIVEC_REG_BIT (i);
27031 /* Similarly, remove the return value from the set. */
27033 bool yes = false;
27034 diddle_return_value (is_altivec_return_reg, &yes);
27035 if (yes)
27036 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
27039 return mask;
27042 /* For a very restricted set of circumstances, we can cut down the
27043 size of prologues/epilogues by calling our own save/restore-the-world
27044 routines. */
27046 static void
27047 compute_save_world_info (rs6000_stack_t *info)
27049 info->world_save_p = 1;
27050 info->world_save_p
27051 = (WORLD_SAVE_P (info)
27052 && DEFAULT_ABI == ABI_DARWIN
27053 && !cfun->has_nonlocal_label
27054 && info->first_fp_reg_save == FIRST_SAVED_FP_REGNO
27055 && info->first_gp_reg_save == FIRST_SAVED_GP_REGNO
27056 && info->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
27057 && info->cr_save_p);
27059 /* This will not work in conjunction with sibcalls. Make sure there
27060 are none. (This check is expensive, but seldom executed.) */
27061 if (WORLD_SAVE_P (info))
27063 rtx_insn *insn;
27064 for (insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
27065 if (CALL_P (insn) && SIBLING_CALL_P (insn))
27067 info->world_save_p = 0;
27068 break;
27072 if (WORLD_SAVE_P (info))
27074 /* Even if we're not touching VRsave, make sure there's room on the
27075 stack for it, if it looks like we're calling SAVE_WORLD, which
27076 will attempt to save it. */
27077 info->vrsave_size = 4;
27079 /* If we are going to save the world, we need to save the link register too. */
27080 info->lr_save_p = 1;
27082 /* "Save" the VRsave register too if we're saving the world. */
27083 if (info->vrsave_mask == 0)
27084 info->vrsave_mask = compute_vrsave_mask ();
27086 /* Because the Darwin register save/restore routines only handle
27087 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
27088 check. */
27089 gcc_assert (info->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
27090 && (info->first_altivec_reg_save
27091 >= FIRST_SAVED_ALTIVEC_REGNO));
27094 return;
27098 static void
27099 is_altivec_return_reg (rtx reg, void *xyes)
27101 bool *yes = (bool *) xyes;
27102 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
27103 *yes = true;
27107 /* Return whether REG is a global user reg or has been specifed by
27108 -ffixed-REG. We should not restore these, and so cannot use
27109 lmw or out-of-line restore functions if there are any. We also
27110 can't save them (well, emit frame notes for them), because frame
27111 unwinding during exception handling will restore saved registers. */
27113 static bool
27114 fixed_reg_p (int reg)
27116 /* Ignore fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] when the
27117 backend sets it, overriding anything the user might have given. */
27118 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
27119 && ((DEFAULT_ABI == ABI_V4 && flag_pic)
27120 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
27121 || (TARGET_TOC && TARGET_MINIMAL_TOC)))
27122 return false;
27124 return fixed_regs[reg];
27127 /* Determine the strategy for savings/restoring registers. */
27129 enum {
27130 SAVE_MULTIPLE = 0x1,
27131 SAVE_INLINE_GPRS = 0x2,
27132 SAVE_INLINE_FPRS = 0x4,
27133 SAVE_NOINLINE_GPRS_SAVES_LR = 0x8,
27134 SAVE_NOINLINE_FPRS_SAVES_LR = 0x10,
27135 SAVE_INLINE_VRS = 0x20,
27136 REST_MULTIPLE = 0x100,
27137 REST_INLINE_GPRS = 0x200,
27138 REST_INLINE_FPRS = 0x400,
27139 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x800,
27140 REST_INLINE_VRS = 0x1000
27143 static int
27144 rs6000_savres_strategy (rs6000_stack_t *info,
27145 bool using_static_chain_p)
27147 int strategy = 0;
27149 /* Select between in-line and out-of-line save and restore of regs.
27150 First, all the obvious cases where we don't use out-of-line. */
27151 if (crtl->calls_eh_return
27152 || cfun->machine->ra_need_lr)
27153 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
27154 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
27155 | SAVE_INLINE_VRS | REST_INLINE_VRS);
27157 if (info->first_gp_reg_save == 32)
27158 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
27160 if (info->first_fp_reg_save == 64
27161 /* The out-of-line FP routines use double-precision stores;
27162 we can't use those routines if we don't have such stores. */
27163 || (TARGET_HARD_FLOAT && !TARGET_DOUBLE_FLOAT))
27164 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
27166 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1)
27167 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
27169 /* Define cutoff for using out-of-line functions to save registers. */
27170 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
27172 if (!optimize_size)
27174 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
27175 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
27176 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
27178 else
27180 /* Prefer out-of-line restore if it will exit. */
27181 if (info->first_fp_reg_save > 61)
27182 strategy |= SAVE_INLINE_FPRS;
27183 if (info->first_gp_reg_save > 29)
27185 if (info->first_fp_reg_save == 64)
27186 strategy |= SAVE_INLINE_GPRS;
27187 else
27188 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
27190 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
27191 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
27194 else if (DEFAULT_ABI == ABI_DARWIN)
27196 if (info->first_fp_reg_save > 60)
27197 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
27198 if (info->first_gp_reg_save > 29)
27199 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
27200 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
27202 else
27204 gcc_checking_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
27205 if ((flag_shrink_wrap_separate && optimize_function_for_speed_p (cfun))
27206 || info->first_fp_reg_save > 61)
27207 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
27208 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
27209 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
27212 /* Don't bother to try to save things out-of-line if r11 is occupied
27213 by the static chain. It would require too much fiddling and the
27214 static chain is rarely used anyway. FPRs are saved w.r.t the stack
27215 pointer on Darwin, and AIX uses r1 or r12. */
27216 if (using_static_chain_p
27217 && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
27218 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
27219 | SAVE_INLINE_GPRS
27220 | SAVE_INLINE_VRS);
27222 /* Saving CR interferes with the exit routines used on the SPE, so
27223 just punt here. */
27224 if (TARGET_SPE_ABI
27225 && info->spe_64bit_regs_used
27226 && info->cr_save_p)
27227 strategy |= REST_INLINE_GPRS;
27229 /* We can only use the out-of-line routines to restore fprs if we've
27230 saved all the registers from first_fp_reg_save in the prologue.
27231 Otherwise, we risk loading garbage. Of course, if we have saved
27232 out-of-line then we know we haven't skipped any fprs. */
27233 if ((strategy & SAVE_INLINE_FPRS)
27234 && !(strategy & REST_INLINE_FPRS))
27236 int i;
27238 for (i = info->first_fp_reg_save; i < 64; i++)
27239 if (fixed_regs[i] || !save_reg_p (i))
27241 strategy |= REST_INLINE_FPRS;
27242 break;
27246 /* Similarly, for altivec regs. */
27247 if ((strategy & SAVE_INLINE_VRS)
27248 && !(strategy & REST_INLINE_VRS))
27250 int i;
27252 for (i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
27253 if (fixed_regs[i] || !save_reg_p (i))
27255 strategy |= REST_INLINE_VRS;
27256 break;
27260 /* info->lr_save_p isn't yet set if the only reason lr needs to be
27261 saved is an out-of-line save or restore. Set up the value for
27262 the next test (excluding out-of-line gprs). */
27263 bool lr_save_p = (info->lr_save_p
27264 || !(strategy & SAVE_INLINE_FPRS)
27265 || !(strategy & SAVE_INLINE_VRS)
27266 || !(strategy & REST_INLINE_FPRS)
27267 || !(strategy & REST_INLINE_VRS));
27269 if (TARGET_MULTIPLE
27270 && !TARGET_POWERPC64
27271 && !(TARGET_SPE_ABI && info->spe_64bit_regs_used)
27272 && info->first_gp_reg_save < 31
27273 && !(flag_shrink_wrap
27274 && flag_shrink_wrap_separate
27275 && optimize_function_for_speed_p (cfun)))
27277 /* Prefer store multiple for saves over out-of-line routines,
27278 since the store-multiple instruction will always be smaller. */
27279 strategy |= SAVE_INLINE_GPRS | SAVE_MULTIPLE;
27281 /* The situation is more complicated with load multiple. We'd
27282 prefer to use the out-of-line routines for restores, since the
27283 "exit" out-of-line routines can handle the restore of LR and the
27284 frame teardown. However if doesn't make sense to use the
27285 out-of-line routine if that is the only reason we'd need to save
27286 LR, and we can't use the "exit" out-of-line gpr restore if we
27287 have saved some fprs; In those cases it is advantageous to use
27288 load multiple when available. */
27289 if (info->first_fp_reg_save != 64 || !lr_save_p)
27290 strategy |= REST_INLINE_GPRS | REST_MULTIPLE;
27293 /* Using the "exit" out-of-line routine does not improve code size
27294 if using it would require lr to be saved and if only saving one
27295 or two gprs. */
27296 else if (!lr_save_p && info->first_gp_reg_save > 29)
27297 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
27299 /* We can only use load multiple or the out-of-line routines to
27300 restore gprs if we've saved all the registers from
27301 first_gp_reg_save. Otherwise, we risk loading garbage.
27302 Of course, if we have saved out-of-line or used stmw then we know
27303 we haven't skipped any gprs. */
27304 if ((strategy & (SAVE_INLINE_GPRS | SAVE_MULTIPLE)) == SAVE_INLINE_GPRS
27305 && (strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
27307 int i;
27309 for (i = info->first_gp_reg_save; i < 32; i++)
27310 if (fixed_reg_p (i) || !save_reg_p (i))
27312 strategy |= REST_INLINE_GPRS;
27313 strategy &= ~REST_MULTIPLE;
27314 break;
27318 if (TARGET_ELF && TARGET_64BIT)
27320 if (!(strategy & SAVE_INLINE_FPRS))
27321 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
27322 else if (!(strategy & SAVE_INLINE_GPRS)
27323 && info->first_fp_reg_save == 64)
27324 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
27326 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
27327 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
27329 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
27330 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
27332 return strategy;
27335 /* Calculate the stack information for the current function. This is
27336 complicated by having two separate calling sequences, the AIX calling
27337 sequence and the V.4 calling sequence.
27339 AIX (and Darwin/Mac OS X) stack frames look like:
27340 32-bit 64-bit
27341 SP----> +---------------------------------------+
27342 | back chain to caller | 0 0
27343 +---------------------------------------+
27344 | saved CR | 4 8 (8-11)
27345 +---------------------------------------+
27346 | saved LR | 8 16
27347 +---------------------------------------+
27348 | reserved for compilers | 12 24
27349 +---------------------------------------+
27350 | reserved for binders | 16 32
27351 +---------------------------------------+
27352 | saved TOC pointer | 20 40
27353 +---------------------------------------+
27354 | Parameter save area (+padding*) (P) | 24 48
27355 +---------------------------------------+
27356 | Alloca space (A) | 24+P etc.
27357 +---------------------------------------+
27358 | Local variable space (L) | 24+P+A
27359 +---------------------------------------+
27360 | Float/int conversion temporary (X) | 24+P+A+L
27361 +---------------------------------------+
27362 | Save area for AltiVec registers (W) | 24+P+A+L+X
27363 +---------------------------------------+
27364 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
27365 +---------------------------------------+
27366 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
27367 +---------------------------------------+
27368 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
27369 +---------------------------------------+
27370 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
27371 +---------------------------------------+
27372 old SP->| back chain to caller's caller |
27373 +---------------------------------------+
27375 * If the alloca area is present, the parameter save area is
27376 padded so that the former starts 16-byte aligned.
27378 The required alignment for AIX configurations is two words (i.e., 8
27379 or 16 bytes).
27381 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
27383 SP----> +---------------------------------------+
27384 | Back chain to caller | 0
27385 +---------------------------------------+
27386 | Save area for CR | 8
27387 +---------------------------------------+
27388 | Saved LR | 16
27389 +---------------------------------------+
27390 | Saved TOC pointer | 24
27391 +---------------------------------------+
27392 | Parameter save area (+padding*) (P) | 32
27393 +---------------------------------------+
27394 | Alloca space (A) | 32+P
27395 +---------------------------------------+
27396 | Local variable space (L) | 32+P+A
27397 +---------------------------------------+
27398 | Save area for AltiVec registers (W) | 32+P+A+L
27399 +---------------------------------------+
27400 | AltiVec alignment padding (Y) | 32+P+A+L+W
27401 +---------------------------------------+
27402 | Save area for GP registers (G) | 32+P+A+L+W+Y
27403 +---------------------------------------+
27404 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
27405 +---------------------------------------+
27406 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
27407 +---------------------------------------+
27409 * If the alloca area is present, the parameter save area is
27410 padded so that the former starts 16-byte aligned.
27412 V.4 stack frames look like:
27414 SP----> +---------------------------------------+
27415 | back chain to caller | 0
27416 +---------------------------------------+
27417 | caller's saved LR | 4
27418 +---------------------------------------+
27419 | Parameter save area (+padding*) (P) | 8
27420 +---------------------------------------+
27421 | Alloca space (A) | 8+P
27422 +---------------------------------------+
27423 | Varargs save area (V) | 8+P+A
27424 +---------------------------------------+
27425 | Local variable space (L) | 8+P+A+V
27426 +---------------------------------------+
27427 | Float/int conversion temporary (X) | 8+P+A+V+L
27428 +---------------------------------------+
27429 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
27430 +---------------------------------------+
27431 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
27432 +---------------------------------------+
27433 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
27434 +---------------------------------------+
27435 | SPE: area for 64-bit GP registers |
27436 +---------------------------------------+
27437 | SPE alignment padding |
27438 +---------------------------------------+
27439 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
27440 +---------------------------------------+
27441 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
27442 +---------------------------------------+
27443 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
27444 +---------------------------------------+
27445 old SP->| back chain to caller's caller |
27446 +---------------------------------------+
27448 * If the alloca area is present and the required alignment is
27449 16 bytes, the parameter save area is padded so that the
27450 alloca area starts 16-byte aligned.
27452 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
27453 given. (But note below and in sysv4.h that we require only 8 and
27454 may round up the size of our stack frame anyways. The historical
27455 reason is early versions of powerpc-linux which didn't properly
27456 align the stack at program startup. A happy side-effect is that
27457 -mno-eabi libraries can be used with -meabi programs.)
27459 The EABI configuration defaults to the V.4 layout. However,
27460 the stack alignment requirements may differ. If -mno-eabi is not
27461 given, the required stack alignment is 8 bytes; if -mno-eabi is
27462 given, the required alignment is 16 bytes. (But see V.4 comment
27463 above.) */
27465 #ifndef ABI_STACK_BOUNDARY
27466 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
27467 #endif
27469 static rs6000_stack_t *
27470 rs6000_stack_info (void)
27472 /* We should never be called for thunks, we are not set up for that. */
27473 gcc_assert (!cfun->is_thunk);
27475 rs6000_stack_t *info = &stack_info;
27476 int reg_size = TARGET_32BIT ? 4 : 8;
27477 int ehrd_size;
27478 int ehcr_size;
27479 int save_align;
27480 int first_gp;
27481 HOST_WIDE_INT non_fixed_size;
27482 bool using_static_chain_p;
27484 if (reload_completed && info->reload_completed)
27485 return info;
27487 memset (info, 0, sizeof (*info));
27488 info->reload_completed = reload_completed;
27490 if (TARGET_SPE)
27492 /* Cache value so we don't rescan instruction chain over and over. */
27493 if (cfun->machine->spe_insn_chain_scanned_p == 0)
27494 cfun->machine->spe_insn_chain_scanned_p
27495 = spe_func_has_64bit_regs_p () + 1;
27496 info->spe_64bit_regs_used = cfun->machine->spe_insn_chain_scanned_p - 1;
27499 /* Select which calling sequence. */
27500 info->abi = DEFAULT_ABI;
27502 /* Calculate which registers need to be saved & save area size. */
27503 info->first_gp_reg_save = first_reg_to_save ();
27504 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
27505 even if it currently looks like we won't. Reload may need it to
27506 get at a constant; if so, it will have already created a constant
27507 pool entry for it. */
27508 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
27509 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
27510 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
27511 && crtl->uses_const_pool
27512 && info->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
27513 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
27514 else
27515 first_gp = info->first_gp_reg_save;
27517 info->gp_size = reg_size * (32 - first_gp);
27519 /* For the SPE, we have an additional upper 32-bits on each GPR.
27520 Ideally we should save the entire 64-bits only when the upper
27521 half is used in SIMD instructions. Since we only record
27522 registers live (not the size they are used in), this proves
27523 difficult because we'd have to traverse the instruction chain at
27524 the right time, taking reload into account. This is a real pain,
27525 so we opt to save the GPRs in 64-bits always if but one register
27526 gets used in 64-bits. Otherwise, all the registers in the frame
27527 get saved in 32-bits.
27529 So... since when we save all GPRs (except the SP) in 64-bits, the
27530 traditional GP save area will be empty. */
27531 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
27532 info->gp_size = 0;
27534 info->first_fp_reg_save = first_fp_reg_to_save ();
27535 info->fp_size = 8 * (64 - info->first_fp_reg_save);
27537 info->first_altivec_reg_save = first_altivec_reg_to_save ();
27538 info->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
27539 - info->first_altivec_reg_save);
27541 /* Does this function call anything? */
27542 info->calls_p = (!crtl->is_leaf || cfun->machine->ra_needs_full_frame);
27544 /* Determine if we need to save the condition code registers. */
27545 if (save_reg_p (CR2_REGNO)
27546 || save_reg_p (CR3_REGNO)
27547 || save_reg_p (CR4_REGNO))
27549 info->cr_save_p = 1;
27550 if (DEFAULT_ABI == ABI_V4)
27551 info->cr_size = reg_size;
27554 /* If the current function calls __builtin_eh_return, then we need
27555 to allocate stack space for registers that will hold data for
27556 the exception handler. */
27557 if (crtl->calls_eh_return)
27559 unsigned int i;
27560 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
27561 continue;
27563 /* SPE saves EH registers in 64-bits. */
27564 ehrd_size = i * (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0
27565 ? UNITS_PER_SPE_WORD : UNITS_PER_WORD);
27567 else
27568 ehrd_size = 0;
27570 /* In the ELFv2 ABI, we also need to allocate space for separate
27571 CR field save areas if the function calls __builtin_eh_return. */
27572 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
27574 /* This hard-codes that we have three call-saved CR fields. */
27575 ehcr_size = 3 * reg_size;
27576 /* We do *not* use the regular CR save mechanism. */
27577 info->cr_save_p = 0;
27579 else
27580 ehcr_size = 0;
27582 /* Determine various sizes. */
27583 info->reg_size = reg_size;
27584 info->fixed_size = RS6000_SAVE_AREA;
27585 info->vars_size = RS6000_ALIGN (get_frame_size (), 8);
27586 if (cfun->calls_alloca)
27587 info->parm_size =
27588 RS6000_ALIGN (crtl->outgoing_args_size + info->fixed_size,
27589 STACK_BOUNDARY / BITS_PER_UNIT) - info->fixed_size;
27590 else
27591 info->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
27592 TARGET_ALTIVEC ? 16 : 8);
27593 if (FRAME_GROWS_DOWNWARD)
27594 info->vars_size
27595 += RS6000_ALIGN (info->fixed_size + info->vars_size + info->parm_size,
27596 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
27597 - (info->fixed_size + info->vars_size + info->parm_size);
27599 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
27600 info->spe_gp_size = 8 * (32 - first_gp);
27602 if (TARGET_ALTIVEC_ABI)
27603 info->vrsave_mask = compute_vrsave_mask ();
27605 if (TARGET_ALTIVEC_VRSAVE && info->vrsave_mask)
27606 info->vrsave_size = 4;
27608 compute_save_world_info (info);
27610 /* Calculate the offsets. */
27611 switch (DEFAULT_ABI)
27613 case ABI_NONE:
27614 default:
27615 gcc_unreachable ();
27617 case ABI_AIX:
27618 case ABI_ELFv2:
27619 case ABI_DARWIN:
27620 info->fp_save_offset = -info->fp_size;
27621 info->gp_save_offset = info->fp_save_offset - info->gp_size;
27623 if (TARGET_ALTIVEC_ABI)
27625 info->vrsave_save_offset = info->gp_save_offset - info->vrsave_size;
27627 /* Align stack so vector save area is on a quadword boundary.
27628 The padding goes above the vectors. */
27629 if (info->altivec_size != 0)
27630 info->altivec_padding_size = info->vrsave_save_offset & 0xF;
27632 info->altivec_save_offset = info->vrsave_save_offset
27633 - info->altivec_padding_size
27634 - info->altivec_size;
27635 gcc_assert (info->altivec_size == 0
27636 || info->altivec_save_offset % 16 == 0);
27638 /* Adjust for AltiVec case. */
27639 info->ehrd_offset = info->altivec_save_offset - ehrd_size;
27641 else
27642 info->ehrd_offset = info->gp_save_offset - ehrd_size;
27644 info->ehcr_offset = info->ehrd_offset - ehcr_size;
27645 info->cr_save_offset = reg_size; /* first word when 64-bit. */
27646 info->lr_save_offset = 2*reg_size;
27647 break;
27649 case ABI_V4:
27650 info->fp_save_offset = -info->fp_size;
27651 info->gp_save_offset = info->fp_save_offset - info->gp_size;
27652 info->cr_save_offset = info->gp_save_offset - info->cr_size;
27654 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
27656 /* Align stack so SPE GPR save area is aligned on a
27657 double-word boundary. */
27658 if (info->spe_gp_size != 0 && info->cr_save_offset != 0)
27659 info->spe_padding_size = 8 - (-info->cr_save_offset % 8);
27660 else
27661 info->spe_padding_size = 0;
27663 info->spe_gp_save_offset = info->cr_save_offset
27664 - info->spe_padding_size
27665 - info->spe_gp_size;
27667 /* Adjust for SPE case. */
27668 info->ehrd_offset = info->spe_gp_save_offset;
27670 else if (TARGET_ALTIVEC_ABI)
27672 info->vrsave_save_offset = info->cr_save_offset - info->vrsave_size;
27674 /* Align stack so vector save area is on a quadword boundary. */
27675 if (info->altivec_size != 0)
27676 info->altivec_padding_size = 16 - (-info->vrsave_save_offset % 16);
27678 info->altivec_save_offset = info->vrsave_save_offset
27679 - info->altivec_padding_size
27680 - info->altivec_size;
27682 /* Adjust for AltiVec case. */
27683 info->ehrd_offset = info->altivec_save_offset;
27685 else
27686 info->ehrd_offset = info->cr_save_offset;
27688 info->ehrd_offset -= ehrd_size;
27689 info->lr_save_offset = reg_size;
27692 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
27693 info->save_size = RS6000_ALIGN (info->fp_size
27694 + info->gp_size
27695 + info->altivec_size
27696 + info->altivec_padding_size
27697 + info->spe_gp_size
27698 + info->spe_padding_size
27699 + ehrd_size
27700 + ehcr_size
27701 + info->cr_size
27702 + info->vrsave_size,
27703 save_align);
27705 non_fixed_size = info->vars_size + info->parm_size + info->save_size;
27707 info->total_size = RS6000_ALIGN (non_fixed_size + info->fixed_size,
27708 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
27710 /* Determine if we need to save the link register. */
27711 if (info->calls_p
27712 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27713 && crtl->profile
27714 && !TARGET_PROFILE_KERNEL)
27715 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
27716 #ifdef TARGET_RELOCATABLE
27717 || (DEFAULT_ABI == ABI_V4
27718 && (TARGET_RELOCATABLE || flag_pic > 1)
27719 && !constant_pool_empty_p ())
27720 #endif
27721 || rs6000_ra_ever_killed ())
27722 info->lr_save_p = 1;
27724 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
27725 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
27726 && call_used_regs[STATIC_CHAIN_REGNUM]);
27727 info->savres_strategy = rs6000_savres_strategy (info, using_static_chain_p);
27729 if (!(info->savres_strategy & SAVE_INLINE_GPRS)
27730 || !(info->savres_strategy & SAVE_INLINE_FPRS)
27731 || !(info->savres_strategy & SAVE_INLINE_VRS)
27732 || !(info->savres_strategy & REST_INLINE_GPRS)
27733 || !(info->savres_strategy & REST_INLINE_FPRS)
27734 || !(info->savres_strategy & REST_INLINE_VRS))
27735 info->lr_save_p = 1;
27737 if (info->lr_save_p)
27738 df_set_regs_ever_live (LR_REGNO, true);
27740 /* Determine if we need to allocate any stack frame:
27742 For AIX we need to push the stack if a frame pointer is needed
27743 (because the stack might be dynamically adjusted), if we are
27744 debugging, if we make calls, or if the sum of fp_save, gp_save,
27745 and local variables are more than the space needed to save all
27746 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
27747 + 18*8 = 288 (GPR13 reserved).
27749 For V.4 we don't have the stack cushion that AIX uses, but assume
27750 that the debugger can handle stackless frames. */
27752 if (info->calls_p)
27753 info->push_p = 1;
27755 else if (DEFAULT_ABI == ABI_V4)
27756 info->push_p = non_fixed_size != 0;
27758 else if (frame_pointer_needed)
27759 info->push_p = 1;
27761 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
27762 info->push_p = 1;
27764 else
27765 info->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
27767 return info;
27770 /* Return true if the current function uses any GPRs in 64-bit SIMD
27771 mode. */
27773 static bool
27774 spe_func_has_64bit_regs_p (void)
27776 rtx_insn *insns, *insn;
27778 /* Functions that save and restore all the call-saved registers will
27779 need to save/restore the registers in 64-bits. */
27780 if (crtl->calls_eh_return
27781 || cfun->calls_setjmp
27782 || crtl->has_nonlocal_goto)
27783 return true;
27785 insns = get_insns ();
27787 for (insn = NEXT_INSN (insns); insn != NULL_RTX; insn = NEXT_INSN (insn))
27789 if (INSN_P (insn))
27791 rtx i;
27793 /* FIXME: This should be implemented with attributes...
27795 (set_attr "spe64" "true")....then,
27796 if (get_spe64(insn)) return true;
27798 It's the only reliable way to do the stuff below. */
27800 i = PATTERN (insn);
27801 if (GET_CODE (i) == SET)
27803 machine_mode mode = GET_MODE (SET_SRC (i));
27805 if (SPE_VECTOR_MODE (mode))
27806 return true;
27807 if (TARGET_E500_DOUBLE
27808 && (mode == DFmode || FLOAT128_2REG_P (mode)))
27809 return true;
27814 return false;
27817 static void
27818 debug_stack_info (rs6000_stack_t *info)
27820 const char *abi_string;
27822 if (! info)
27823 info = rs6000_stack_info ();
27825 fprintf (stderr, "\nStack information for function %s:\n",
27826 ((current_function_decl && DECL_NAME (current_function_decl))
27827 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
27828 : "<unknown>"));
27830 switch (info->abi)
27832 default: abi_string = "Unknown"; break;
27833 case ABI_NONE: abi_string = "NONE"; break;
27834 case ABI_AIX: abi_string = "AIX"; break;
27835 case ABI_ELFv2: abi_string = "ELFv2"; break;
27836 case ABI_DARWIN: abi_string = "Darwin"; break;
27837 case ABI_V4: abi_string = "V.4"; break;
27840 fprintf (stderr, "\tABI = %5s\n", abi_string);
27842 if (TARGET_ALTIVEC_ABI)
27843 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
27845 if (TARGET_SPE_ABI)
27846 fprintf (stderr, "\tSPE ABI extensions enabled.\n");
27848 if (info->first_gp_reg_save != 32)
27849 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
27851 if (info->first_fp_reg_save != 64)
27852 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
27854 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
27855 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
27856 info->first_altivec_reg_save);
27858 if (info->lr_save_p)
27859 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
27861 if (info->cr_save_p)
27862 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
27864 if (info->vrsave_mask)
27865 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
27867 if (info->push_p)
27868 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
27870 if (info->calls_p)
27871 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
27873 if (info->gp_size)
27874 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
27876 if (info->fp_size)
27877 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
27879 if (info->altivec_size)
27880 fprintf (stderr, "\taltivec_save_offset = %5d\n",
27881 info->altivec_save_offset);
27883 if (info->spe_gp_size)
27884 fprintf (stderr, "\tspe_gp_save_offset = %5d\n",
27885 info->spe_gp_save_offset);
27887 if (info->vrsave_size)
27888 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
27889 info->vrsave_save_offset);
27891 if (info->lr_save_p)
27892 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
27894 if (info->cr_save_p)
27895 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
27897 if (info->varargs_save_offset)
27898 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
27900 if (info->total_size)
27901 fprintf (stderr, "\ttotal_size = " HOST_WIDE_INT_PRINT_DEC"\n",
27902 info->total_size);
27904 if (info->vars_size)
27905 fprintf (stderr, "\tvars_size = " HOST_WIDE_INT_PRINT_DEC"\n",
27906 info->vars_size);
27908 if (info->parm_size)
27909 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
27911 if (info->fixed_size)
27912 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
27914 if (info->gp_size)
27915 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
27917 if (info->spe_gp_size)
27918 fprintf (stderr, "\tspe_gp_size = %5d\n", info->spe_gp_size);
27920 if (info->fp_size)
27921 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
27923 if (info->altivec_size)
27924 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
27926 if (info->vrsave_size)
27927 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
27929 if (info->altivec_padding_size)
27930 fprintf (stderr, "\taltivec_padding_size= %5d\n",
27931 info->altivec_padding_size);
27933 if (info->spe_padding_size)
27934 fprintf (stderr, "\tspe_padding_size = %5d\n",
27935 info->spe_padding_size);
27937 if (info->cr_size)
27938 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
27940 if (info->save_size)
27941 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
27943 if (info->reg_size != 4)
27944 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
27946 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
27948 fprintf (stderr, "\n");
27952 rs6000_return_addr (int count, rtx frame)
27954 /* Currently we don't optimize very well between prolog and body
27955 code and for PIC code the code can be actually quite bad, so
27956 don't try to be too clever here. */
27957 if (count != 0
27958 || ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN) && flag_pic))
27960 cfun->machine->ra_needs_full_frame = 1;
27962 return
27963 gen_rtx_MEM
27964 (Pmode,
27965 memory_address
27966 (Pmode,
27967 plus_constant (Pmode,
27968 copy_to_reg
27969 (gen_rtx_MEM (Pmode,
27970 memory_address (Pmode, frame))),
27971 RETURN_ADDRESS_OFFSET)));
27974 cfun->machine->ra_need_lr = 1;
27975 return get_hard_reg_initial_val (Pmode, LR_REGNO);
27978 /* Say whether a function is a candidate for sibcall handling or not. */
27980 static bool
27981 rs6000_function_ok_for_sibcall (tree decl, tree exp)
27983 tree fntype;
27985 if (decl)
27986 fntype = TREE_TYPE (decl);
27987 else
27988 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
27990 /* We can't do it if the called function has more vector parameters
27991 than the current function; there's nowhere to put the VRsave code. */
27992 if (TARGET_ALTIVEC_ABI
27993 && TARGET_ALTIVEC_VRSAVE
27994 && !(decl && decl == current_function_decl))
27996 function_args_iterator args_iter;
27997 tree type;
27998 int nvreg = 0;
28000 /* Functions with vector parameters are required to have a
28001 prototype, so the argument type info must be available
28002 here. */
28003 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
28004 if (TREE_CODE (type) == VECTOR_TYPE
28005 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
28006 nvreg++;
28008 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
28009 if (TREE_CODE (type) == VECTOR_TYPE
28010 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
28011 nvreg--;
28013 if (nvreg > 0)
28014 return false;
28017 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
28018 functions, because the callee may have a different TOC pointer to
28019 the caller and there's no way to ensure we restore the TOC when
28020 we return. With the secure-plt SYSV ABI we can't make non-local
28021 calls when -fpic/PIC because the plt call stubs use r30. */
28022 if (DEFAULT_ABI == ABI_DARWIN
28023 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
28024 && decl
28025 && !DECL_EXTERNAL (decl)
28026 && !DECL_WEAK (decl)
28027 && (*targetm.binds_local_p) (decl))
28028 || (DEFAULT_ABI == ABI_V4
28029 && (!TARGET_SECURE_PLT
28030 || !flag_pic
28031 || (decl
28032 && (*targetm.binds_local_p) (decl)))))
28034 tree attr_list = TYPE_ATTRIBUTES (fntype);
28036 if (!lookup_attribute ("longcall", attr_list)
28037 || lookup_attribute ("shortcall", attr_list))
28038 return true;
28041 return false;
28044 static int
28045 rs6000_ra_ever_killed (void)
28047 rtx_insn *top;
28048 rtx reg;
28049 rtx_insn *insn;
28051 if (cfun->is_thunk)
28052 return 0;
28054 if (cfun->machine->lr_save_state)
28055 return cfun->machine->lr_save_state - 1;
28057 /* regs_ever_live has LR marked as used if any sibcalls are present,
28058 but this should not force saving and restoring in the
28059 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
28060 clobbers LR, so that is inappropriate. */
28062 /* Also, the prologue can generate a store into LR that
28063 doesn't really count, like this:
28065 move LR->R0
28066 bcl to set PIC register
28067 move LR->R31
28068 move R0->LR
28070 When we're called from the epilogue, we need to avoid counting
28071 this as a store. */
28073 push_topmost_sequence ();
28074 top = get_insns ();
28075 pop_topmost_sequence ();
28076 reg = gen_rtx_REG (Pmode, LR_REGNO);
28078 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
28080 if (INSN_P (insn))
28082 if (CALL_P (insn))
28084 if (!SIBLING_CALL_P (insn))
28085 return 1;
28087 else if (find_regno_note (insn, REG_INC, LR_REGNO))
28088 return 1;
28089 else if (set_of (reg, insn) != NULL_RTX
28090 && !prologue_epilogue_contains (insn))
28091 return 1;
28094 return 0;
28097 /* Emit instructions needed to load the TOC register.
28098 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
28099 a constant pool; or for SVR4 -fpic. */
28101 void
28102 rs6000_emit_load_toc_table (int fromprolog)
28104 rtx dest;
28105 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
28107 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI == ABI_V4 && flag_pic)
28109 char buf[30];
28110 rtx lab, tmp1, tmp2, got;
28112 lab = gen_label_rtx ();
28113 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
28114 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
28115 if (flag_pic == 2)
28117 got = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
28118 need_toc_init = 1;
28120 else
28121 got = rs6000_got_sym ();
28122 tmp1 = tmp2 = dest;
28123 if (!fromprolog)
28125 tmp1 = gen_reg_rtx (Pmode);
28126 tmp2 = gen_reg_rtx (Pmode);
28128 emit_insn (gen_load_toc_v4_PIC_1 (lab));
28129 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
28130 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
28131 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
28133 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
28135 emit_insn (gen_load_toc_v4_pic_si ());
28136 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
28138 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 2)
28140 char buf[30];
28141 rtx temp0 = (fromprolog
28142 ? gen_rtx_REG (Pmode, 0)
28143 : gen_reg_rtx (Pmode));
28145 if (fromprolog)
28147 rtx symF, symL;
28149 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
28150 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
28152 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
28153 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
28155 emit_insn (gen_load_toc_v4_PIC_1 (symF));
28156 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
28157 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
28159 else
28161 rtx tocsym, lab;
28163 tocsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
28164 need_toc_init = 1;
28165 lab = gen_label_rtx ();
28166 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
28167 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
28168 if (TARGET_LINK_STACK)
28169 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
28170 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
28172 emit_insn (gen_addsi3 (dest, temp0, dest));
28174 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
28176 /* This is for AIX code running in non-PIC ELF32. */
28177 rtx realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
28179 need_toc_init = 1;
28180 emit_insn (gen_elf_high (dest, realsym));
28181 emit_insn (gen_elf_low (dest, dest, realsym));
28183 else
28185 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
28187 if (TARGET_32BIT)
28188 emit_insn (gen_load_toc_aix_si (dest));
28189 else
28190 emit_insn (gen_load_toc_aix_di (dest));
28194 /* Emit instructions to restore the link register after determining where
28195 its value has been stored. */
28197 void
28198 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
28200 rs6000_stack_t *info = rs6000_stack_info ();
28201 rtx operands[2];
28203 operands[0] = source;
28204 operands[1] = scratch;
28206 if (info->lr_save_p)
28208 rtx frame_rtx = stack_pointer_rtx;
28209 HOST_WIDE_INT sp_offset = 0;
28210 rtx tmp;
28212 if (frame_pointer_needed
28213 || cfun->calls_alloca
28214 || info->total_size > 32767)
28216 tmp = gen_frame_mem (Pmode, frame_rtx);
28217 emit_move_insn (operands[1], tmp);
28218 frame_rtx = operands[1];
28220 else if (info->push_p)
28221 sp_offset = info->total_size;
28223 tmp = plus_constant (Pmode, frame_rtx,
28224 info->lr_save_offset + sp_offset);
28225 tmp = gen_frame_mem (Pmode, tmp);
28226 emit_move_insn (tmp, operands[0]);
28228 else
28229 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
28231 /* Freeze lr_save_p. We've just emitted rtl that depends on the
28232 state of lr_save_p so any change from here on would be a bug. In
28233 particular, stop rs6000_ra_ever_killed from considering the SET
28234 of lr we may have added just above. */
28235 cfun->machine->lr_save_state = info->lr_save_p + 1;
28238 static GTY(()) alias_set_type set = -1;
28240 alias_set_type
28241 get_TOC_alias_set (void)
28243 if (set == -1)
28244 set = new_alias_set ();
28245 return set;
28248 /* This returns nonzero if the current function uses the TOC. This is
28249 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
28250 is generated by the ABI_V4 load_toc_* patterns. */
28251 #if TARGET_ELF
28252 static int
28253 uses_TOC (void)
28255 rtx_insn *insn;
28257 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
28258 if (INSN_P (insn))
28260 rtx pat = PATTERN (insn);
28261 int i;
28263 if (GET_CODE (pat) == PARALLEL)
28264 for (i = 0; i < XVECLEN (pat, 0); i++)
28266 rtx sub = XVECEXP (pat, 0, i);
28267 if (GET_CODE (sub) == USE)
28269 sub = XEXP (sub, 0);
28270 if (GET_CODE (sub) == UNSPEC
28271 && XINT (sub, 1) == UNSPEC_TOC)
28272 return 1;
28276 return 0;
28278 #endif
28281 create_TOC_reference (rtx symbol, rtx largetoc_reg)
28283 rtx tocrel, tocreg, hi;
28285 if (TARGET_DEBUG_ADDR)
28287 if (GET_CODE (symbol) == SYMBOL_REF)
28288 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
28289 XSTR (symbol, 0));
28290 else
28292 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
28293 GET_RTX_NAME (GET_CODE (symbol)));
28294 debug_rtx (symbol);
28298 if (!can_create_pseudo_p ())
28299 df_set_regs_ever_live (TOC_REGISTER, true);
28301 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
28302 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
28303 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
28304 return tocrel;
28306 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
28307 if (largetoc_reg != NULL)
28309 emit_move_insn (largetoc_reg, hi);
28310 hi = largetoc_reg;
28312 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
28315 /* Issue assembly directives that create a reference to the given DWARF
28316 FRAME_TABLE_LABEL from the current function section. */
28317 void
28318 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
28320 fprintf (asm_out_file, "\t.ref %s\n",
28321 (* targetm.strip_name_encoding) (frame_table_label));
28324 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
28325 and the change to the stack pointer. */
28327 static void
28328 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
28330 rtvec p;
28331 int i;
28332 rtx regs[3];
28334 i = 0;
28335 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
28336 if (hard_frame_needed)
28337 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
28338 if (!(REGNO (fp) == STACK_POINTER_REGNUM
28339 || (hard_frame_needed
28340 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
28341 regs[i++] = fp;
28343 p = rtvec_alloc (i);
28344 while (--i >= 0)
28346 rtx mem = gen_frame_mem (BLKmode, regs[i]);
28347 RTVEC_ELT (p, i) = gen_rtx_SET (mem, const0_rtx);
28350 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
28353 /* Emit the correct code for allocating stack space, as insns.
28354 If COPY_REG, make sure a copy of the old frame is left there.
28355 The generated code may use hard register 0 as a temporary. */
28357 static rtx_insn *
28358 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
28360 rtx_insn *insn;
28361 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
28362 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
28363 rtx todec = gen_int_mode (-size, Pmode);
28364 rtx par, set, mem;
28366 if (INTVAL (todec) != -size)
28368 warning (0, "stack frame too large");
28369 emit_insn (gen_trap ());
28370 return 0;
28373 if (crtl->limit_stack)
28375 if (REG_P (stack_limit_rtx)
28376 && REGNO (stack_limit_rtx) > 1
28377 && REGNO (stack_limit_rtx) <= 31)
28379 emit_insn (gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size)));
28380 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
28381 const0_rtx));
28383 else if (GET_CODE (stack_limit_rtx) == SYMBOL_REF
28384 && TARGET_32BIT
28385 && DEFAULT_ABI == ABI_V4
28386 && !flag_pic)
28388 rtx toload = gen_rtx_CONST (VOIDmode,
28389 gen_rtx_PLUS (Pmode,
28390 stack_limit_rtx,
28391 GEN_INT (size)));
28393 emit_insn (gen_elf_high (tmp_reg, toload));
28394 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
28395 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
28396 const0_rtx));
28398 else
28399 warning (0, "stack limit expression is not supported");
28402 if (copy_reg)
28404 if (copy_off != 0)
28405 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
28406 else
28407 emit_move_insn (copy_reg, stack_reg);
28410 if (size > 32767)
28412 /* Need a note here so that try_split doesn't get confused. */
28413 if (get_last_insn () == NULL_RTX)
28414 emit_note (NOTE_INSN_DELETED);
28415 insn = emit_move_insn (tmp_reg, todec);
28416 try_split (PATTERN (insn), insn, 0);
28417 todec = tmp_reg;
28420 insn = emit_insn (TARGET_32BIT
28421 ? gen_movsi_update_stack (stack_reg, stack_reg,
28422 todec, stack_reg)
28423 : gen_movdi_di_update_stack (stack_reg, stack_reg,
28424 todec, stack_reg));
28425 /* Since we didn't use gen_frame_mem to generate the MEM, grab
28426 it now and set the alias set/attributes. The above gen_*_update
28427 calls will generate a PARALLEL with the MEM set being the first
28428 operation. */
28429 par = PATTERN (insn);
28430 gcc_assert (GET_CODE (par) == PARALLEL);
28431 set = XVECEXP (par, 0, 0);
28432 gcc_assert (GET_CODE (set) == SET);
28433 mem = SET_DEST (set);
28434 gcc_assert (MEM_P (mem));
28435 MEM_NOTRAP_P (mem) = 1;
28436 set_mem_alias_set (mem, get_frame_alias_set ());
28438 RTX_FRAME_RELATED_P (insn) = 1;
28439 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
28440 gen_rtx_SET (stack_reg, gen_rtx_PLUS (Pmode, stack_reg,
28441 GEN_INT (-size))));
28442 return insn;
28445 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
28447 #if PROBE_INTERVAL > 32768
28448 #error Cannot use indexed addressing mode for stack probing
28449 #endif
28451 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
28452 inclusive. These are offsets from the current stack pointer. */
28454 static void
28455 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
28457 /* See if we have a constant small number of probes to generate. If so,
28458 that's the easy case. */
28459 if (first + size <= 32768)
28461 HOST_WIDE_INT i;
28463 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
28464 it exceeds SIZE. If only one probe is needed, this will not
28465 generate any code. Then probe at FIRST + SIZE. */
28466 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
28467 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
28468 -(first + i)));
28470 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
28471 -(first + size)));
28474 /* Otherwise, do the same as above, but in a loop. Note that we must be
28475 extra careful with variables wrapping around because we might be at
28476 the very top (or the very bottom) of the address space and we have
28477 to be able to handle this case properly; in particular, we use an
28478 equality test for the loop condition. */
28479 else
28481 HOST_WIDE_INT rounded_size;
28482 rtx r12 = gen_rtx_REG (Pmode, 12);
28483 rtx r0 = gen_rtx_REG (Pmode, 0);
28485 /* Sanity check for the addressing mode we're going to use. */
28486 gcc_assert (first <= 32768);
28488 /* Step 1: round SIZE to the previous multiple of the interval. */
28490 rounded_size = ROUND_DOWN (size, PROBE_INTERVAL);
28493 /* Step 2: compute initial and final value of the loop counter. */
28495 /* TEST_ADDR = SP + FIRST. */
28496 emit_insn (gen_rtx_SET (r12, plus_constant (Pmode, stack_pointer_rtx,
28497 -first)));
28499 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
28500 if (rounded_size > 32768)
28502 emit_move_insn (r0, GEN_INT (-rounded_size));
28503 emit_insn (gen_rtx_SET (r0, gen_rtx_PLUS (Pmode, r12, r0)));
28505 else
28506 emit_insn (gen_rtx_SET (r0, plus_constant (Pmode, r12,
28507 -rounded_size)));
28510 /* Step 3: the loop
28514 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
28515 probe at TEST_ADDR
28517 while (TEST_ADDR != LAST_ADDR)
28519 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
28520 until it is equal to ROUNDED_SIZE. */
28522 if (TARGET_64BIT)
28523 emit_insn (gen_probe_stack_rangedi (r12, r12, r0));
28524 else
28525 emit_insn (gen_probe_stack_rangesi (r12, r12, r0));
28528 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
28529 that SIZE is equal to ROUNDED_SIZE. */
28531 if (size != rounded_size)
28532 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
28536 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
28537 absolute addresses. */
28539 const char *
28540 output_probe_stack_range (rtx reg1, rtx reg2)
28542 static int labelno = 0;
28543 char loop_lab[32];
28544 rtx xops[2];
28546 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
28548 /* Loop. */
28549 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
28551 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
28552 xops[0] = reg1;
28553 xops[1] = GEN_INT (-PROBE_INTERVAL);
28554 output_asm_insn ("addi %0,%0,%1", xops);
28556 /* Probe at TEST_ADDR. */
28557 xops[1] = gen_rtx_REG (Pmode, 0);
28558 output_asm_insn ("stw %1,0(%0)", xops);
28560 /* Test if TEST_ADDR == LAST_ADDR. */
28561 xops[1] = reg2;
28562 if (TARGET_64BIT)
28563 output_asm_insn ("cmpd 0,%0,%1", xops);
28564 else
28565 output_asm_insn ("cmpw 0,%0,%1", xops);
28567 /* Branch. */
28568 fputs ("\tbne 0,", asm_out_file);
28569 assemble_name_raw (asm_out_file, loop_lab);
28570 fputc ('\n', asm_out_file);
28572 return "";
28575 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
28576 with (plus:P (reg 1) VAL), and with REG2 replaced with REPL2 if REG2
28577 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
28578 deduce these equivalences by itself so it wasn't necessary to hold
28579 its hand so much. Don't be tempted to always supply d2_f_d_e with
28580 the actual cfa register, ie. r31 when we are using a hard frame
28581 pointer. That fails when saving regs off r1, and sched moves the
28582 r31 setup past the reg saves. */
28584 static rtx_insn *
28585 rs6000_frame_related (rtx_insn *insn, rtx reg, HOST_WIDE_INT val,
28586 rtx reg2, rtx repl2)
28588 rtx repl;
28590 if (REGNO (reg) == STACK_POINTER_REGNUM)
28592 gcc_checking_assert (val == 0);
28593 repl = NULL_RTX;
28595 else
28596 repl = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
28597 GEN_INT (val));
28599 rtx pat = PATTERN (insn);
28600 if (!repl && !reg2)
28602 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
28603 if (GET_CODE (pat) == PARALLEL)
28604 for (int i = 0; i < XVECLEN (pat, 0); i++)
28605 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
28607 rtx set = XVECEXP (pat, 0, i);
28609 /* If this PARALLEL has been emitted for out-of-line
28610 register save functions, or store multiple, then omit
28611 eh_frame info for any user-defined global regs. If
28612 eh_frame info is supplied, frame unwinding will
28613 restore a user reg. */
28614 if (!REG_P (SET_SRC (set))
28615 || !fixed_reg_p (REGNO (SET_SRC (set))))
28616 RTX_FRAME_RELATED_P (set) = 1;
28618 RTX_FRAME_RELATED_P (insn) = 1;
28619 return insn;
28622 /* We expect that 'pat' is either a SET or a PARALLEL containing
28623 SETs (and possibly other stuff). In a PARALLEL, all the SETs
28624 are important so they all have to be marked RTX_FRAME_RELATED_P.
28625 Call simplify_replace_rtx on the SETs rather than the whole insn
28626 so as to leave the other stuff alone (for example USE of r12). */
28628 set_used_flags (pat);
28629 if (GET_CODE (pat) == SET)
28631 if (repl)
28632 pat = simplify_replace_rtx (pat, reg, repl);
28633 if (reg2)
28634 pat = simplify_replace_rtx (pat, reg2, repl2);
28636 else if (GET_CODE (pat) == PARALLEL)
28638 pat = shallow_copy_rtx (pat);
28639 XVEC (pat, 0) = shallow_copy_rtvec (XVEC (pat, 0));
28641 for (int i = 0; i < XVECLEN (pat, 0); i++)
28642 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
28644 rtx set = XVECEXP (pat, 0, i);
28646 if (repl)
28647 set = simplify_replace_rtx (set, reg, repl);
28648 if (reg2)
28649 set = simplify_replace_rtx (set, reg2, repl2);
28650 XVECEXP (pat, 0, i) = set;
28652 /* Omit eh_frame info for any user-defined global regs. */
28653 if (!REG_P (SET_SRC (set))
28654 || !fixed_reg_p (REGNO (SET_SRC (set))))
28655 RTX_FRAME_RELATED_P (set) = 1;
28658 else
28659 gcc_unreachable ();
28661 RTX_FRAME_RELATED_P (insn) = 1;
28662 add_reg_note (insn, REG_FRAME_RELATED_EXPR, copy_rtx_if_shared (pat));
28664 return insn;
28667 /* Returns an insn that has a vrsave set operation with the
28668 appropriate CLOBBERs. */
28670 static rtx
28671 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
28673 int nclobs, i;
28674 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
28675 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
28677 clobs[0]
28678 = gen_rtx_SET (vrsave,
28679 gen_rtx_UNSPEC_VOLATILE (SImode,
28680 gen_rtvec (2, reg, vrsave),
28681 UNSPECV_SET_VRSAVE));
28683 nclobs = 1;
28685 /* We need to clobber the registers in the mask so the scheduler
28686 does not move sets to VRSAVE before sets of AltiVec registers.
28688 However, if the function receives nonlocal gotos, reload will set
28689 all call saved registers live. We will end up with:
28691 (set (reg 999) (mem))
28692 (parallel [ (set (reg vrsave) (unspec blah))
28693 (clobber (reg 999))])
28695 The clobber will cause the store into reg 999 to be dead, and
28696 flow will attempt to delete an epilogue insn. In this case, we
28697 need an unspec use/set of the register. */
28699 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
28700 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28702 if (!epiloguep || call_used_regs [i])
28703 clobs[nclobs++] = gen_rtx_CLOBBER (VOIDmode,
28704 gen_rtx_REG (V4SImode, i));
28705 else
28707 rtx reg = gen_rtx_REG (V4SImode, i);
28709 clobs[nclobs++]
28710 = gen_rtx_SET (reg,
28711 gen_rtx_UNSPEC (V4SImode,
28712 gen_rtvec (1, reg), 27));
28716 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
28718 for (i = 0; i < nclobs; ++i)
28719 XVECEXP (insn, 0, i) = clobs[i];
28721 return insn;
28724 static rtx
28725 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
28727 rtx addr, mem;
28729 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
28730 mem = gen_frame_mem (GET_MODE (reg), addr);
28731 return gen_rtx_SET (store ? mem : reg, store ? reg : mem);
28734 static rtx
28735 gen_frame_load (rtx reg, rtx frame_reg, int offset)
28737 return gen_frame_set (reg, frame_reg, offset, false);
28740 static rtx
28741 gen_frame_store (rtx reg, rtx frame_reg, int offset)
28743 return gen_frame_set (reg, frame_reg, offset, true);
28746 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
28747 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
28749 static rtx_insn *
28750 emit_frame_save (rtx frame_reg, machine_mode mode,
28751 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
28753 rtx reg;
28755 /* Some cases that need register indexed addressing. */
28756 gcc_checking_assert (!((TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
28757 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
28758 || (TARGET_E500_DOUBLE && mode == DFmode)
28759 || (TARGET_SPE_ABI
28760 && SPE_VECTOR_MODE (mode)
28761 && !SPE_CONST_OFFSET_OK (offset))));
28763 reg = gen_rtx_REG (mode, regno);
28764 rtx_insn *insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
28765 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
28766 NULL_RTX, NULL_RTX);
28769 /* Emit an offset memory reference suitable for a frame store, while
28770 converting to a valid addressing mode. */
28772 static rtx
28773 gen_frame_mem_offset (machine_mode mode, rtx reg, int offset)
28775 rtx int_rtx, offset_rtx;
28777 int_rtx = GEN_INT (offset);
28779 if ((TARGET_SPE_ABI && SPE_VECTOR_MODE (mode) && !SPE_CONST_OFFSET_OK (offset))
28780 || (TARGET_E500_DOUBLE && mode == DFmode))
28782 offset_rtx = gen_rtx_REG (Pmode, FIXED_SCRATCH);
28783 emit_move_insn (offset_rtx, int_rtx);
28785 else
28786 offset_rtx = int_rtx;
28788 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, offset_rtx));
28791 #ifndef TARGET_FIX_AND_CONTINUE
28792 #define TARGET_FIX_AND_CONTINUE 0
28793 #endif
28795 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
28796 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
28797 #define LAST_SAVRES_REGISTER 31
28798 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
28800 enum {
28801 SAVRES_LR = 0x1,
28802 SAVRES_SAVE = 0x2,
28803 SAVRES_REG = 0x0c,
28804 SAVRES_GPR = 0,
28805 SAVRES_FPR = 4,
28806 SAVRES_VR = 8
28809 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
28811 /* Temporary holding space for an out-of-line register save/restore
28812 routine name. */
28813 static char savres_routine_name[30];
28815 /* Return the name for an out-of-line register save/restore routine.
28816 We are saving/restoring GPRs if GPR is true. */
28818 static char *
28819 rs6000_savres_routine_name (rs6000_stack_t *info, int regno, int sel)
28821 const char *prefix = "";
28822 const char *suffix = "";
28824 /* Different targets are supposed to define
28825 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
28826 routine name could be defined with:
28828 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
28830 This is a nice idea in practice, but in reality, things are
28831 complicated in several ways:
28833 - ELF targets have save/restore routines for GPRs.
28835 - SPE targets use different prefixes for 32/64-bit registers, and
28836 neither of them fit neatly in the FOO_{PREFIX,SUFFIX} regimen.
28838 - PPC64 ELF targets have routines for save/restore of GPRs that
28839 differ in what they do with the link register, so having a set
28840 prefix doesn't work. (We only use one of the save routines at
28841 the moment, though.)
28843 - PPC32 elf targets have "exit" versions of the restore routines
28844 that restore the link register and can save some extra space.
28845 These require an extra suffix. (There are also "tail" versions
28846 of the restore routines and "GOT" versions of the save routines,
28847 but we don't generate those at present. Same problems apply,
28848 though.)
28850 We deal with all this by synthesizing our own prefix/suffix and
28851 using that for the simple sprintf call shown above. */
28852 if (TARGET_SPE)
28854 /* No floating point saves on the SPE. */
28855 gcc_assert ((sel & SAVRES_REG) == SAVRES_GPR);
28857 if ((sel & SAVRES_SAVE))
28858 prefix = info->spe_64bit_regs_used ? "_save64gpr_" : "_save32gpr_";
28859 else
28860 prefix = info->spe_64bit_regs_used ? "_rest64gpr_" : "_rest32gpr_";
28862 if ((sel & SAVRES_LR))
28863 suffix = "_x";
28865 else if (DEFAULT_ABI == ABI_V4)
28867 if (TARGET_64BIT)
28868 goto aix_names;
28870 if ((sel & SAVRES_REG) == SAVRES_GPR)
28871 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
28872 else if ((sel & SAVRES_REG) == SAVRES_FPR)
28873 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
28874 else if ((sel & SAVRES_REG) == SAVRES_VR)
28875 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
28876 else
28877 abort ();
28879 if ((sel & SAVRES_LR))
28880 suffix = "_x";
28882 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
28884 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
28885 /* No out-of-line save/restore routines for GPRs on AIX. */
28886 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
28887 #endif
28889 aix_names:
28890 if ((sel & SAVRES_REG) == SAVRES_GPR)
28891 prefix = ((sel & SAVRES_SAVE)
28892 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
28893 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
28894 else if ((sel & SAVRES_REG) == SAVRES_FPR)
28896 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
28897 if ((sel & SAVRES_LR))
28898 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
28899 else
28900 #endif
28902 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
28903 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
28906 else if ((sel & SAVRES_REG) == SAVRES_VR)
28907 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
28908 else
28909 abort ();
28912 if (DEFAULT_ABI == ABI_DARWIN)
28914 /* The Darwin approach is (slightly) different, in order to be
28915 compatible with code generated by the system toolchain. There is a
28916 single symbol for the start of save sequence, and the code here
28917 embeds an offset into that code on the basis of the first register
28918 to be saved. */
28919 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
28920 if ((sel & SAVRES_REG) == SAVRES_GPR)
28921 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
28922 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
28923 (regno - 13) * 4, prefix, regno);
28924 else if ((sel & SAVRES_REG) == SAVRES_FPR)
28925 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
28926 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
28927 else if ((sel & SAVRES_REG) == SAVRES_VR)
28928 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
28929 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
28930 else
28931 abort ();
28933 else
28934 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
28936 return savres_routine_name;
28939 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
28940 We are saving/restoring GPRs if GPR is true. */
28942 static rtx
28943 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
28945 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
28946 ? info->first_gp_reg_save
28947 : (sel & SAVRES_REG) == SAVRES_FPR
28948 ? info->first_fp_reg_save - 32
28949 : (sel & SAVRES_REG) == SAVRES_VR
28950 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
28951 : -1);
28952 rtx sym;
28953 int select = sel;
28955 /* On the SPE, we never have any FPRs, but we do have 32/64-bit
28956 versions of the gpr routines. */
28957 if (TARGET_SPE_ABI && (sel & SAVRES_REG) == SAVRES_GPR
28958 && info->spe_64bit_regs_used)
28959 select ^= SAVRES_FPR ^ SAVRES_GPR;
28961 /* Don't generate bogus routine names. */
28962 gcc_assert (FIRST_SAVRES_REGISTER <= regno
28963 && regno <= LAST_SAVRES_REGISTER
28964 && select >= 0 && select <= 12);
28966 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
28968 if (sym == NULL)
28970 char *name;
28972 name = rs6000_savres_routine_name (info, regno, sel);
28974 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
28975 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
28976 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
28979 return sym;
28982 /* Emit a sequence of insns, including a stack tie if needed, for
28983 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
28984 reset the stack pointer, but move the base of the frame into
28985 reg UPDT_REGNO for use by out-of-line register restore routines. */
28987 static rtx
28988 rs6000_emit_stack_reset (rs6000_stack_t *info,
28989 rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
28990 unsigned updt_regno)
28992 /* If there is nothing to do, don't do anything. */
28993 if (frame_off == 0 && REGNO (frame_reg_rtx) == updt_regno)
28994 return NULL_RTX;
28996 rtx updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
28998 /* This blockage is needed so that sched doesn't decide to move
28999 the sp change before the register restores. */
29000 if (DEFAULT_ABI == ABI_V4
29001 || (TARGET_SPE_ABI
29002 && info->spe_64bit_regs_used != 0
29003 && info->first_gp_reg_save != 32))
29004 return emit_insn (gen_stack_restore_tie (updt_reg_rtx, frame_reg_rtx,
29005 GEN_INT (frame_off)));
29007 /* If we are restoring registers out-of-line, we will be using the
29008 "exit" variants of the restore routines, which will reset the
29009 stack for us. But we do need to point updt_reg into the
29010 right place for those routines. */
29011 if (frame_off != 0)
29012 return emit_insn (gen_add3_insn (updt_reg_rtx,
29013 frame_reg_rtx, GEN_INT (frame_off)));
29014 else
29015 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
29017 return NULL_RTX;
29020 /* Return the register number used as a pointer by out-of-line
29021 save/restore functions. */
29023 static inline unsigned
29024 ptr_regno_for_savres (int sel)
29026 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
29027 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
29028 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
29031 /* Construct a parallel rtx describing the effect of a call to an
29032 out-of-line register save/restore routine, and emit the insn
29033 or jump_insn as appropriate. */
29035 static rtx_insn *
29036 rs6000_emit_savres_rtx (rs6000_stack_t *info,
29037 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
29038 machine_mode reg_mode, int sel)
29040 int i;
29041 int offset, start_reg, end_reg, n_regs, use_reg;
29042 int reg_size = GET_MODE_SIZE (reg_mode);
29043 rtx sym;
29044 rtvec p;
29045 rtx par;
29046 rtx_insn *insn;
29048 offset = 0;
29049 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
29050 ? info->first_gp_reg_save
29051 : (sel & SAVRES_REG) == SAVRES_FPR
29052 ? info->first_fp_reg_save
29053 : (sel & SAVRES_REG) == SAVRES_VR
29054 ? info->first_altivec_reg_save
29055 : -1);
29056 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
29057 ? 32
29058 : (sel & SAVRES_REG) == SAVRES_FPR
29059 ? 64
29060 : (sel & SAVRES_REG) == SAVRES_VR
29061 ? LAST_ALTIVEC_REGNO + 1
29062 : -1);
29063 n_regs = end_reg - start_reg;
29064 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
29065 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
29066 + n_regs);
29068 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
29069 RTVEC_ELT (p, offset++) = ret_rtx;
29071 RTVEC_ELT (p, offset++)
29072 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
29074 sym = rs6000_savres_routine_sym (info, sel);
29075 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
29077 use_reg = ptr_regno_for_savres (sel);
29078 if ((sel & SAVRES_REG) == SAVRES_VR)
29080 /* Vector regs are saved/restored using [reg+reg] addressing. */
29081 RTVEC_ELT (p, offset++)
29082 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, use_reg));
29083 RTVEC_ELT (p, offset++)
29084 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
29086 else
29087 RTVEC_ELT (p, offset++)
29088 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
29090 for (i = 0; i < end_reg - start_reg; i++)
29091 RTVEC_ELT (p, i + offset)
29092 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
29093 frame_reg_rtx, save_area_offset + reg_size * i,
29094 (sel & SAVRES_SAVE) != 0);
29096 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
29097 RTVEC_ELT (p, i + offset)
29098 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
29100 par = gen_rtx_PARALLEL (VOIDmode, p);
29102 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
29104 insn = emit_jump_insn (par);
29105 JUMP_LABEL (insn) = ret_rtx;
29107 else
29108 insn = emit_insn (par);
29109 return insn;
29112 /* Emit code to store CR fields that need to be saved into REG. */
29114 static void
29115 rs6000_emit_move_from_cr (rtx reg)
29117 /* Only the ELFv2 ABI allows storing only selected fields. */
29118 if (DEFAULT_ABI == ABI_ELFv2 && TARGET_MFCRF)
29120 int i, cr_reg[8], count = 0;
29122 /* Collect CR fields that must be saved. */
29123 for (i = 0; i < 8; i++)
29124 if (save_reg_p (CR0_REGNO + i))
29125 cr_reg[count++] = i;
29127 /* If it's just a single one, use mfcrf. */
29128 if (count == 1)
29130 rtvec p = rtvec_alloc (1);
29131 rtvec r = rtvec_alloc (2);
29132 RTVEC_ELT (r, 0) = gen_rtx_REG (CCmode, CR0_REGNO + cr_reg[0]);
29133 RTVEC_ELT (r, 1) = GEN_INT (1 << (7 - cr_reg[0]));
29134 RTVEC_ELT (p, 0)
29135 = gen_rtx_SET (reg,
29136 gen_rtx_UNSPEC (SImode, r, UNSPEC_MOVESI_FROM_CR));
29138 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
29139 return;
29142 /* ??? It might be better to handle count == 2 / 3 cases here
29143 as well, using logical operations to combine the values. */
29146 emit_insn (gen_movesi_from_cr (reg));
29149 /* Return whether the split-stack arg pointer (r12) is used. */
29151 static bool
29152 split_stack_arg_pointer_used_p (void)
29154 /* If the pseudo holding the arg pointer is no longer a pseudo,
29155 then the arg pointer is used. */
29156 if (cfun->machine->split_stack_arg_pointer != NULL_RTX
29157 && (!REG_P (cfun->machine->split_stack_arg_pointer)
29158 || (REGNO (cfun->machine->split_stack_arg_pointer)
29159 < FIRST_PSEUDO_REGISTER)))
29160 return true;
29162 /* Unfortunately we also need to do some code scanning, since
29163 r12 may have been substituted for the pseudo. */
29164 rtx_insn *insn;
29165 basic_block bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
29166 FOR_BB_INSNS (bb, insn)
29167 if (NONDEBUG_INSN_P (insn))
29169 /* A call destroys r12. */
29170 if (CALL_P (insn))
29171 return false;
29173 df_ref use;
29174 FOR_EACH_INSN_USE (use, insn)
29176 rtx x = DF_REF_REG (use);
29177 if (REG_P (x) && REGNO (x) == 12)
29178 return true;
29180 df_ref def;
29181 FOR_EACH_INSN_DEF (def, insn)
29183 rtx x = DF_REF_REG (def);
29184 if (REG_P (x) && REGNO (x) == 12)
29185 return false;
29188 return bitmap_bit_p (DF_LR_OUT (bb), 12);
29191 /* Return whether we need to emit an ELFv2 global entry point prologue. */
29193 static bool
29194 rs6000_global_entry_point_needed_p (void)
29196 /* Only needed for the ELFv2 ABI. */
29197 if (DEFAULT_ABI != ABI_ELFv2)
29198 return false;
29200 /* With -msingle-pic-base, we assume the whole program shares the same
29201 TOC, so no global entry point prologues are needed anywhere. */
29202 if (TARGET_SINGLE_PIC_BASE)
29203 return false;
29205 /* Ensure we have a global entry point for thunks. ??? We could
29206 avoid that if the target routine doesn't need a global entry point,
29207 but we do not know whether this is the case at this point. */
29208 if (cfun->is_thunk)
29209 return true;
29211 /* For regular functions, rs6000_emit_prologue sets this flag if the
29212 routine ever uses the TOC pointer. */
29213 return cfun->machine->r2_setup_needed;
29216 /* Implement TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS. */
29217 static sbitmap
29218 rs6000_get_separate_components (void)
29220 rs6000_stack_t *info = rs6000_stack_info ();
29222 if (WORLD_SAVE_P (info))
29223 return NULL;
29225 if (TARGET_SPE_ABI)
29226 return NULL;
29228 gcc_assert (!(info->savres_strategy & SAVE_MULTIPLE)
29229 && !(info->savres_strategy & REST_MULTIPLE));
29231 /* Component 0 is the save/restore of LR (done via GPR0).
29232 Components 13..31 are the save/restore of GPR13..GPR31.
29233 Components 46..63 are the save/restore of FPR14..FPR31. */
29235 cfun->machine->n_components = 64;
29237 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
29238 bitmap_clear (components);
29240 int reg_size = TARGET_32BIT ? 4 : 8;
29241 int fp_reg_size = 8;
29243 /* The GPRs we need saved to the frame. */
29244 if ((info->savres_strategy & SAVE_INLINE_GPRS)
29245 && (info->savres_strategy & REST_INLINE_GPRS))
29247 int offset = info->gp_save_offset;
29248 if (info->push_p)
29249 offset += info->total_size;
29251 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
29253 if (IN_RANGE (offset, -0x8000, 0x7fff)
29254 && rs6000_reg_live_or_pic_offset_p (regno))
29255 bitmap_set_bit (components, regno);
29257 offset += reg_size;
29261 /* Don't mess with the hard frame pointer. */
29262 if (frame_pointer_needed)
29263 bitmap_clear_bit (components, HARD_FRAME_POINTER_REGNUM);
29265 /* Don't mess with the fixed TOC register. */
29266 if ((TARGET_TOC && TARGET_MINIMAL_TOC)
29267 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
29268 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
29269 bitmap_clear_bit (components, RS6000_PIC_OFFSET_TABLE_REGNUM);
29271 /* The FPRs we need saved to the frame. */
29272 if ((info->savres_strategy & SAVE_INLINE_FPRS)
29273 && (info->savres_strategy & REST_INLINE_FPRS))
29275 int offset = info->fp_save_offset;
29276 if (info->push_p)
29277 offset += info->total_size;
29279 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
29281 if (IN_RANGE (offset, -0x8000, 0x7fff) && save_reg_p (regno))
29282 bitmap_set_bit (components, regno);
29284 offset += fp_reg_size;
29288 /* Optimize LR save and restore if we can. This is component 0. Any
29289 out-of-line register save/restore routines need LR. */
29290 if (info->lr_save_p
29291 && !(flag_pic && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
29292 && (info->savres_strategy & SAVE_INLINE_GPRS)
29293 && (info->savres_strategy & REST_INLINE_GPRS)
29294 && (info->savres_strategy & SAVE_INLINE_FPRS)
29295 && (info->savres_strategy & REST_INLINE_FPRS)
29296 && (info->savres_strategy & SAVE_INLINE_VRS)
29297 && (info->savres_strategy & REST_INLINE_VRS))
29299 int offset = info->lr_save_offset;
29300 if (info->push_p)
29301 offset += info->total_size;
29302 if (IN_RANGE (offset, -0x8000, 0x7fff))
29303 bitmap_set_bit (components, 0);
29306 return components;
29309 /* Implement TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB. */
29310 static sbitmap
29311 rs6000_components_for_bb (basic_block bb)
29313 rs6000_stack_t *info = rs6000_stack_info ();
29315 bitmap in = DF_LIVE_IN (bb);
29316 bitmap gen = &DF_LIVE_BB_INFO (bb)->gen;
29317 bitmap kill = &DF_LIVE_BB_INFO (bb)->kill;
29319 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
29320 bitmap_clear (components);
29322 /* A register is used in a bb if it is in the IN, GEN, or KILL sets. */
29324 /* GPRs. */
29325 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
29326 if (bitmap_bit_p (in, regno)
29327 || bitmap_bit_p (gen, regno)
29328 || bitmap_bit_p (kill, regno))
29329 bitmap_set_bit (components, regno);
29331 /* FPRs. */
29332 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
29333 if (bitmap_bit_p (in, regno)
29334 || bitmap_bit_p (gen, regno)
29335 || bitmap_bit_p (kill, regno))
29336 bitmap_set_bit (components, regno);
29338 /* The link register. */
29339 if (bitmap_bit_p (in, LR_REGNO)
29340 || bitmap_bit_p (gen, LR_REGNO)
29341 || bitmap_bit_p (kill, LR_REGNO))
29342 bitmap_set_bit (components, 0);
29344 return components;
29347 /* Implement TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS. */
29348 static void
29349 rs6000_disqualify_components (sbitmap components, edge e,
29350 sbitmap edge_components, bool /*is_prologue*/)
29352 /* Our LR pro/epilogue code moves LR via R0, so R0 had better not be
29353 live where we want to place that code. */
29354 if (bitmap_bit_p (edge_components, 0)
29355 && bitmap_bit_p (DF_LIVE_IN (e->dest), 0))
29357 if (dump_file)
29358 fprintf (dump_file, "Disqualifying LR because GPR0 is live "
29359 "on entry to bb %d\n", e->dest->index);
29360 bitmap_clear_bit (components, 0);
29364 /* Implement TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS. */
29365 static void
29366 rs6000_emit_prologue_components (sbitmap components)
29368 rs6000_stack_t *info = rs6000_stack_info ();
29369 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
29370 ? HARD_FRAME_POINTER_REGNUM
29371 : STACK_POINTER_REGNUM);
29373 machine_mode reg_mode = Pmode;
29374 int reg_size = TARGET_32BIT ? 4 : 8;
29375 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
29376 ? DFmode : SFmode;
29377 int fp_reg_size = 8;
29379 /* Prologue for LR. */
29380 if (bitmap_bit_p (components, 0))
29382 rtx reg = gen_rtx_REG (reg_mode, 0);
29383 rtx_insn *insn = emit_move_insn (reg, gen_rtx_REG (reg_mode, LR_REGNO));
29384 RTX_FRAME_RELATED_P (insn) = 1;
29385 add_reg_note (insn, REG_CFA_REGISTER, NULL);
29387 int offset = info->lr_save_offset;
29388 if (info->push_p)
29389 offset += info->total_size;
29391 insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
29392 RTX_FRAME_RELATED_P (insn) = 1;
29393 rtx lr = gen_rtx_REG (reg_mode, LR_REGNO);
29394 rtx mem = copy_rtx (SET_DEST (single_set (insn)));
29395 add_reg_note (insn, REG_CFA_OFFSET, gen_rtx_SET (mem, lr));
29398 /* Prologue for the GPRs. */
29399 int offset = info->gp_save_offset;
29400 if (info->push_p)
29401 offset += info->total_size;
29403 for (int i = info->first_gp_reg_save; i < 32; i++)
29405 if (bitmap_bit_p (components, i))
29407 rtx reg = gen_rtx_REG (reg_mode, i);
29408 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
29409 RTX_FRAME_RELATED_P (insn) = 1;
29410 rtx set = copy_rtx (single_set (insn));
29411 add_reg_note (insn, REG_CFA_OFFSET, set);
29414 offset += reg_size;
29417 /* Prologue for the FPRs. */
29418 offset = info->fp_save_offset;
29419 if (info->push_p)
29420 offset += info->total_size;
29422 for (int i = info->first_fp_reg_save; i < 64; i++)
29424 if (bitmap_bit_p (components, i))
29426 rtx reg = gen_rtx_REG (fp_reg_mode, i);
29427 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
29428 RTX_FRAME_RELATED_P (insn) = 1;
29429 rtx set = copy_rtx (single_set (insn));
29430 add_reg_note (insn, REG_CFA_OFFSET, set);
29433 offset += fp_reg_size;
29437 /* Implement TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS. */
29438 static void
29439 rs6000_emit_epilogue_components (sbitmap components)
29441 rs6000_stack_t *info = rs6000_stack_info ();
29442 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
29443 ? HARD_FRAME_POINTER_REGNUM
29444 : STACK_POINTER_REGNUM);
29446 machine_mode reg_mode = Pmode;
29447 int reg_size = TARGET_32BIT ? 4 : 8;
29449 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
29450 ? DFmode : SFmode;
29451 int fp_reg_size = 8;
29453 /* Epilogue for the FPRs. */
29454 int offset = info->fp_save_offset;
29455 if (info->push_p)
29456 offset += info->total_size;
29458 for (int i = info->first_fp_reg_save; i < 64; i++)
29460 if (bitmap_bit_p (components, i))
29462 rtx reg = gen_rtx_REG (fp_reg_mode, i);
29463 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
29464 RTX_FRAME_RELATED_P (insn) = 1;
29465 add_reg_note (insn, REG_CFA_RESTORE, reg);
29468 offset += fp_reg_size;
29471 /* Epilogue for the GPRs. */
29472 offset = info->gp_save_offset;
29473 if (info->push_p)
29474 offset += info->total_size;
29476 for (int i = info->first_gp_reg_save; i < 32; i++)
29478 if (bitmap_bit_p (components, i))
29480 rtx reg = gen_rtx_REG (reg_mode, i);
29481 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
29482 RTX_FRAME_RELATED_P (insn) = 1;
29483 add_reg_note (insn, REG_CFA_RESTORE, reg);
29486 offset += reg_size;
29489 /* Epilogue for LR. */
29490 if (bitmap_bit_p (components, 0))
29492 int offset = info->lr_save_offset;
29493 if (info->push_p)
29494 offset += info->total_size;
29496 rtx reg = gen_rtx_REG (reg_mode, 0);
29497 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
29499 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
29500 insn = emit_move_insn (lr, reg);
29501 RTX_FRAME_RELATED_P (insn) = 1;
29502 add_reg_note (insn, REG_CFA_RESTORE, lr);
29506 /* Implement TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS. */
29507 static void
29508 rs6000_set_handled_components (sbitmap components)
29510 rs6000_stack_t *info = rs6000_stack_info ();
29512 for (int i = info->first_gp_reg_save; i < 32; i++)
29513 if (bitmap_bit_p (components, i))
29514 cfun->machine->gpr_is_wrapped_separately[i] = true;
29516 for (int i = info->first_fp_reg_save; i < 64; i++)
29517 if (bitmap_bit_p (components, i))
29518 cfun->machine->fpr_is_wrapped_separately[i - 32] = true;
29520 if (bitmap_bit_p (components, 0))
29521 cfun->machine->lr_is_wrapped_separately = true;
29524 /* Emit function prologue as insns. */
29526 void
29527 rs6000_emit_prologue (void)
29529 rs6000_stack_t *info = rs6000_stack_info ();
29530 machine_mode reg_mode = Pmode;
29531 int reg_size = TARGET_32BIT ? 4 : 8;
29532 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
29533 ? DFmode : SFmode;
29534 int fp_reg_size = 8;
29535 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29536 rtx frame_reg_rtx = sp_reg_rtx;
29537 unsigned int cr_save_regno;
29538 rtx cr_save_rtx = NULL_RTX;
29539 rtx_insn *insn;
29540 int strategy;
29541 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
29542 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
29543 && call_used_regs[STATIC_CHAIN_REGNUM]);
29544 int using_split_stack = (flag_split_stack
29545 && (lookup_attribute ("no_split_stack",
29546 DECL_ATTRIBUTES (cfun->decl))
29547 == NULL));
29549 /* Offset to top of frame for frame_reg and sp respectively. */
29550 HOST_WIDE_INT frame_off = 0;
29551 HOST_WIDE_INT sp_off = 0;
29552 /* sp_adjust is the stack adjusting instruction, tracked so that the
29553 insn setting up the split-stack arg pointer can be emitted just
29554 prior to it, when r12 is not used here for other purposes. */
29555 rtx_insn *sp_adjust = 0;
29557 #if CHECKING_P
29558 /* Track and check usage of r0, r11, r12. */
29559 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
29560 #define START_USE(R) do \
29562 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
29563 reg_inuse |= 1 << (R); \
29564 } while (0)
29565 #define END_USE(R) do \
29567 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
29568 reg_inuse &= ~(1 << (R)); \
29569 } while (0)
29570 #define NOT_INUSE(R) do \
29572 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
29573 } while (0)
29574 #else
29575 #define START_USE(R) do {} while (0)
29576 #define END_USE(R) do {} while (0)
29577 #define NOT_INUSE(R) do {} while (0)
29578 #endif
29580 if (DEFAULT_ABI == ABI_ELFv2
29581 && !TARGET_SINGLE_PIC_BASE)
29583 cfun->machine->r2_setup_needed = df_regs_ever_live_p (TOC_REGNUM);
29585 /* With -mminimal-toc we may generate an extra use of r2 below. */
29586 if (TARGET_TOC && TARGET_MINIMAL_TOC
29587 && !constant_pool_empty_p ())
29588 cfun->machine->r2_setup_needed = true;
29592 if (flag_stack_usage_info)
29593 current_function_static_stack_size = info->total_size;
29595 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
29597 HOST_WIDE_INT size = info->total_size;
29599 if (crtl->is_leaf && !cfun->calls_alloca)
29601 if (size > PROBE_INTERVAL && size > STACK_CHECK_PROTECT)
29602 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT,
29603 size - STACK_CHECK_PROTECT);
29605 else if (size > 0)
29606 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
29609 if (TARGET_FIX_AND_CONTINUE)
29611 /* gdb on darwin arranges to forward a function from the old
29612 address by modifying the first 5 instructions of the function
29613 to branch to the overriding function. This is necessary to
29614 permit function pointers that point to the old function to
29615 actually forward to the new function. */
29616 emit_insn (gen_nop ());
29617 emit_insn (gen_nop ());
29618 emit_insn (gen_nop ());
29619 emit_insn (gen_nop ());
29620 emit_insn (gen_nop ());
29623 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
29625 reg_mode = V2SImode;
29626 reg_size = 8;
29629 /* Handle world saves specially here. */
29630 if (WORLD_SAVE_P (info))
29632 int i, j, sz;
29633 rtx treg;
29634 rtvec p;
29635 rtx reg0;
29637 /* save_world expects lr in r0. */
29638 reg0 = gen_rtx_REG (Pmode, 0);
29639 if (info->lr_save_p)
29641 insn = emit_move_insn (reg0,
29642 gen_rtx_REG (Pmode, LR_REGNO));
29643 RTX_FRAME_RELATED_P (insn) = 1;
29646 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
29647 assumptions about the offsets of various bits of the stack
29648 frame. */
29649 gcc_assert (info->gp_save_offset == -220
29650 && info->fp_save_offset == -144
29651 && info->lr_save_offset == 8
29652 && info->cr_save_offset == 4
29653 && info->push_p
29654 && info->lr_save_p
29655 && (!crtl->calls_eh_return
29656 || info->ehrd_offset == -432)
29657 && info->vrsave_save_offset == -224
29658 && info->altivec_save_offset == -416);
29660 treg = gen_rtx_REG (SImode, 11);
29661 emit_move_insn (treg, GEN_INT (-info->total_size));
29663 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
29664 in R11. It also clobbers R12, so beware! */
29666 /* Preserve CR2 for save_world prologues */
29667 sz = 5;
29668 sz += 32 - info->first_gp_reg_save;
29669 sz += 64 - info->first_fp_reg_save;
29670 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
29671 p = rtvec_alloc (sz);
29672 j = 0;
29673 RTVEC_ELT (p, j++) = gen_rtx_CLOBBER (VOIDmode,
29674 gen_rtx_REG (SImode,
29675 LR_REGNO));
29676 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
29677 gen_rtx_SYMBOL_REF (Pmode,
29678 "*save_world"));
29679 /* We do floats first so that the instruction pattern matches
29680 properly. */
29681 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
29682 RTVEC_ELT (p, j++)
29683 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
29684 ? DFmode : SFmode,
29685 info->first_fp_reg_save + i),
29686 frame_reg_rtx,
29687 info->fp_save_offset + frame_off + 8 * i);
29688 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
29689 RTVEC_ELT (p, j++)
29690 = gen_frame_store (gen_rtx_REG (V4SImode,
29691 info->first_altivec_reg_save + i),
29692 frame_reg_rtx,
29693 info->altivec_save_offset + frame_off + 16 * i);
29694 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
29695 RTVEC_ELT (p, j++)
29696 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
29697 frame_reg_rtx,
29698 info->gp_save_offset + frame_off + reg_size * i);
29700 /* CR register traditionally saved as CR2. */
29701 RTVEC_ELT (p, j++)
29702 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
29703 frame_reg_rtx, info->cr_save_offset + frame_off);
29704 /* Explain about use of R0. */
29705 if (info->lr_save_p)
29706 RTVEC_ELT (p, j++)
29707 = gen_frame_store (reg0,
29708 frame_reg_rtx, info->lr_save_offset + frame_off);
29709 /* Explain what happens to the stack pointer. */
29711 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
29712 RTVEC_ELT (p, j++) = gen_rtx_SET (sp_reg_rtx, newval);
29715 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
29716 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
29717 treg, GEN_INT (-info->total_size));
29718 sp_off = frame_off = info->total_size;
29721 strategy = info->savres_strategy;
29723 /* For V.4, update stack before we do any saving and set back pointer. */
29724 if (! WORLD_SAVE_P (info)
29725 && info->push_p
29726 && (DEFAULT_ABI == ABI_V4
29727 || crtl->calls_eh_return))
29729 bool need_r11 = (TARGET_SPE
29730 ? (!(strategy & SAVE_INLINE_GPRS)
29731 && info->spe_64bit_regs_used == 0)
29732 : (!(strategy & SAVE_INLINE_FPRS)
29733 || !(strategy & SAVE_INLINE_GPRS)
29734 || !(strategy & SAVE_INLINE_VRS)));
29735 int ptr_regno = -1;
29736 rtx ptr_reg = NULL_RTX;
29737 int ptr_off = 0;
29739 if (info->total_size < 32767)
29740 frame_off = info->total_size;
29741 else if (need_r11)
29742 ptr_regno = 11;
29743 else if (info->cr_save_p
29744 || info->lr_save_p
29745 || info->first_fp_reg_save < 64
29746 || info->first_gp_reg_save < 32
29747 || info->altivec_size != 0
29748 || info->vrsave_size != 0
29749 || crtl->calls_eh_return)
29750 ptr_regno = 12;
29751 else
29753 /* The prologue won't be saving any regs so there is no need
29754 to set up a frame register to access any frame save area.
29755 We also won't be using frame_off anywhere below, but set
29756 the correct value anyway to protect against future
29757 changes to this function. */
29758 frame_off = info->total_size;
29760 if (ptr_regno != -1)
29762 /* Set up the frame offset to that needed by the first
29763 out-of-line save function. */
29764 START_USE (ptr_regno);
29765 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
29766 frame_reg_rtx = ptr_reg;
29767 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
29768 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
29769 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
29770 ptr_off = info->gp_save_offset + info->gp_size;
29771 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
29772 ptr_off = info->altivec_save_offset + info->altivec_size;
29773 frame_off = -ptr_off;
29775 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
29776 ptr_reg, ptr_off);
29777 if (REGNO (frame_reg_rtx) == 12)
29778 sp_adjust = 0;
29779 sp_off = info->total_size;
29780 if (frame_reg_rtx != sp_reg_rtx)
29781 rs6000_emit_stack_tie (frame_reg_rtx, false);
29784 /* If we use the link register, get it into r0. */
29785 if (!WORLD_SAVE_P (info) && info->lr_save_p
29786 && !cfun->machine->lr_is_wrapped_separately)
29788 rtx addr, reg, mem;
29790 reg = gen_rtx_REG (Pmode, 0);
29791 START_USE (0);
29792 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
29793 RTX_FRAME_RELATED_P (insn) = 1;
29795 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
29796 | SAVE_NOINLINE_FPRS_SAVES_LR)))
29798 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
29799 GEN_INT (info->lr_save_offset + frame_off));
29800 mem = gen_rtx_MEM (Pmode, addr);
29801 /* This should not be of rs6000_sr_alias_set, because of
29802 __builtin_return_address. */
29804 insn = emit_move_insn (mem, reg);
29805 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
29806 NULL_RTX, NULL_RTX);
29807 END_USE (0);
29811 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
29812 r12 will be needed by out-of-line gpr restore. */
29813 cr_save_regno = ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
29814 && !(strategy & (SAVE_INLINE_GPRS
29815 | SAVE_NOINLINE_GPRS_SAVES_LR))
29816 ? 11 : 12);
29817 if (!WORLD_SAVE_P (info)
29818 && info->cr_save_p
29819 && REGNO (frame_reg_rtx) != cr_save_regno
29820 && !(using_static_chain_p && cr_save_regno == 11)
29821 && !(using_split_stack && cr_save_regno == 12 && sp_adjust))
29823 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
29824 START_USE (cr_save_regno);
29825 rs6000_emit_move_from_cr (cr_save_rtx);
29828 /* Do any required saving of fpr's. If only one or two to save, do
29829 it ourselves. Otherwise, call function. */
29830 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
29832 int offset = info->fp_save_offset + frame_off;
29833 for (int i = info->first_fp_reg_save; i < 64; i++)
29835 if (save_reg_p (i)
29836 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
29837 emit_frame_save (frame_reg_rtx, fp_reg_mode, i, offset,
29838 sp_off - frame_off);
29840 offset += fp_reg_size;
29843 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
29845 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
29846 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
29847 unsigned ptr_regno = ptr_regno_for_savres (sel);
29848 rtx ptr_reg = frame_reg_rtx;
29850 if (REGNO (frame_reg_rtx) == ptr_regno)
29851 gcc_checking_assert (frame_off == 0);
29852 else
29854 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
29855 NOT_INUSE (ptr_regno);
29856 emit_insn (gen_add3_insn (ptr_reg,
29857 frame_reg_rtx, GEN_INT (frame_off)));
29859 insn = rs6000_emit_savres_rtx (info, ptr_reg,
29860 info->fp_save_offset,
29861 info->lr_save_offset,
29862 DFmode, sel);
29863 rs6000_frame_related (insn, ptr_reg, sp_off,
29864 NULL_RTX, NULL_RTX);
29865 if (lr)
29866 END_USE (0);
29869 /* Save GPRs. This is done as a PARALLEL if we are using
29870 the store-multiple instructions. */
29871 if (!WORLD_SAVE_P (info)
29872 && TARGET_SPE_ABI
29873 && info->spe_64bit_regs_used != 0
29874 && info->first_gp_reg_save != 32)
29876 int i;
29877 rtx spe_save_area_ptr;
29878 HOST_WIDE_INT save_off;
29879 int ool_adjust = 0;
29881 /* Determine whether we can address all of the registers that need
29882 to be saved with an offset from frame_reg_rtx that fits in
29883 the small const field for SPE memory instructions. */
29884 int spe_regs_addressable
29885 = (SPE_CONST_OFFSET_OK (info->spe_gp_save_offset + frame_off
29886 + reg_size * (32 - info->first_gp_reg_save - 1))
29887 && (strategy & SAVE_INLINE_GPRS));
29889 if (spe_regs_addressable)
29891 spe_save_area_ptr = frame_reg_rtx;
29892 save_off = frame_off;
29894 else
29896 /* Make r11 point to the start of the SPE save area. We need
29897 to be careful here if r11 is holding the static chain. If
29898 it is, then temporarily save it in r0. */
29899 HOST_WIDE_INT offset;
29901 if (!(strategy & SAVE_INLINE_GPRS))
29902 ool_adjust = 8 * (info->first_gp_reg_save - FIRST_SAVED_GP_REGNO);
29903 offset = info->spe_gp_save_offset + frame_off - ool_adjust;
29904 spe_save_area_ptr = gen_rtx_REG (Pmode, 11);
29905 save_off = frame_off - offset;
29907 if (using_static_chain_p)
29909 rtx r0 = gen_rtx_REG (Pmode, 0);
29911 START_USE (0);
29912 gcc_assert (info->first_gp_reg_save > 11);
29914 emit_move_insn (r0, spe_save_area_ptr);
29916 else if (REGNO (frame_reg_rtx) != 11)
29917 START_USE (11);
29919 emit_insn (gen_addsi3 (spe_save_area_ptr,
29920 frame_reg_rtx, GEN_INT (offset)));
29921 if (!using_static_chain_p && REGNO (frame_reg_rtx) == 11)
29922 frame_off = -info->spe_gp_save_offset + ool_adjust;
29925 if ((strategy & SAVE_INLINE_GPRS))
29927 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
29928 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
29929 emit_frame_save (spe_save_area_ptr, reg_mode,
29930 info->first_gp_reg_save + i,
29931 (info->spe_gp_save_offset + save_off
29932 + reg_size * i),
29933 sp_off - save_off);
29935 else
29937 insn = rs6000_emit_savres_rtx (info, spe_save_area_ptr,
29938 info->spe_gp_save_offset + save_off,
29939 0, reg_mode,
29940 SAVRES_SAVE | SAVRES_GPR);
29942 rs6000_frame_related (insn, spe_save_area_ptr, sp_off - save_off,
29943 NULL_RTX, NULL_RTX);
29946 /* Move the static chain pointer back. */
29947 if (!spe_regs_addressable)
29949 if (using_static_chain_p)
29951 emit_move_insn (spe_save_area_ptr, gen_rtx_REG (Pmode, 0));
29952 END_USE (0);
29954 else if (REGNO (frame_reg_rtx) != 11)
29955 END_USE (11);
29958 else if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
29960 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
29961 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
29962 unsigned ptr_regno = ptr_regno_for_savres (sel);
29963 rtx ptr_reg = frame_reg_rtx;
29964 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
29965 int end_save = info->gp_save_offset + info->gp_size;
29966 int ptr_off;
29968 if (ptr_regno == 12)
29969 sp_adjust = 0;
29970 if (!ptr_set_up)
29971 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
29973 /* Need to adjust r11 (r12) if we saved any FPRs. */
29974 if (end_save + frame_off != 0)
29976 rtx offset = GEN_INT (end_save + frame_off);
29978 if (ptr_set_up)
29979 frame_off = -end_save;
29980 else
29981 NOT_INUSE (ptr_regno);
29982 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
29984 else if (!ptr_set_up)
29986 NOT_INUSE (ptr_regno);
29987 emit_move_insn (ptr_reg, frame_reg_rtx);
29989 ptr_off = -end_save;
29990 insn = rs6000_emit_savres_rtx (info, ptr_reg,
29991 info->gp_save_offset + ptr_off,
29992 info->lr_save_offset + ptr_off,
29993 reg_mode, sel);
29994 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
29995 NULL_RTX, NULL_RTX);
29996 if (lr)
29997 END_USE (0);
29999 else if (!WORLD_SAVE_P (info) && (strategy & SAVE_MULTIPLE))
30001 rtvec p;
30002 int i;
30003 p = rtvec_alloc (32 - info->first_gp_reg_save);
30004 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
30005 RTVEC_ELT (p, i)
30006 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
30007 frame_reg_rtx,
30008 info->gp_save_offset + frame_off + reg_size * i);
30009 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
30010 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
30011 NULL_RTX, NULL_RTX);
30013 else if (!WORLD_SAVE_P (info))
30015 int offset = info->gp_save_offset + frame_off;
30016 for (int i = info->first_gp_reg_save; i < 32; i++)
30018 if (rs6000_reg_live_or_pic_offset_p (i)
30019 && !cfun->machine->gpr_is_wrapped_separately[i])
30020 emit_frame_save (frame_reg_rtx, reg_mode, i, offset,
30021 sp_off - frame_off);
30023 offset += reg_size;
30027 if (crtl->calls_eh_return)
30029 unsigned int i;
30030 rtvec p;
30032 for (i = 0; ; ++i)
30034 unsigned int regno = EH_RETURN_DATA_REGNO (i);
30035 if (regno == INVALID_REGNUM)
30036 break;
30039 p = rtvec_alloc (i);
30041 for (i = 0; ; ++i)
30043 unsigned int regno = EH_RETURN_DATA_REGNO (i);
30044 if (regno == INVALID_REGNUM)
30045 break;
30047 rtx set
30048 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
30049 sp_reg_rtx,
30050 info->ehrd_offset + sp_off + reg_size * (int) i);
30051 RTVEC_ELT (p, i) = set;
30052 RTX_FRAME_RELATED_P (set) = 1;
30055 insn = emit_insn (gen_blockage ());
30056 RTX_FRAME_RELATED_P (insn) = 1;
30057 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
30060 /* In AIX ABI we need to make sure r2 is really saved. */
30061 if (TARGET_AIX && crtl->calls_eh_return)
30063 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
30064 rtx join_insn, note;
30065 rtx_insn *save_insn;
30066 long toc_restore_insn;
30068 tmp_reg = gen_rtx_REG (Pmode, 11);
30069 tmp_reg_si = gen_rtx_REG (SImode, 11);
30070 if (using_static_chain_p)
30072 START_USE (0);
30073 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
30075 else
30076 START_USE (11);
30077 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
30078 /* Peek at instruction to which this function returns. If it's
30079 restoring r2, then we know we've already saved r2. We can't
30080 unconditionally save r2 because the value we have will already
30081 be updated if we arrived at this function via a plt call or
30082 toc adjusting stub. */
30083 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
30084 toc_restore_insn = ((TARGET_32BIT ? 0x80410000 : 0xE8410000)
30085 + RS6000_TOC_SAVE_SLOT);
30086 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
30087 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
30088 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
30089 validate_condition_mode (EQ, CCUNSmode);
30090 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
30091 emit_insn (gen_rtx_SET (compare_result,
30092 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
30093 toc_save_done = gen_label_rtx ();
30094 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
30095 gen_rtx_EQ (VOIDmode, compare_result,
30096 const0_rtx),
30097 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
30098 pc_rtx);
30099 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
30100 JUMP_LABEL (jump) = toc_save_done;
30101 LABEL_NUSES (toc_save_done) += 1;
30103 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
30104 TOC_REGNUM, frame_off + RS6000_TOC_SAVE_SLOT,
30105 sp_off - frame_off);
30107 emit_label (toc_save_done);
30109 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
30110 have a CFG that has different saves along different paths.
30111 Move the note to a dummy blockage insn, which describes that
30112 R2 is unconditionally saved after the label. */
30113 /* ??? An alternate representation might be a special insn pattern
30114 containing both the branch and the store. That might let the
30115 code that minimizes the number of DW_CFA_advance opcodes better
30116 freedom in placing the annotations. */
30117 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
30118 if (note)
30119 remove_note (save_insn, note);
30120 else
30121 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
30122 copy_rtx (PATTERN (save_insn)), NULL_RTX);
30123 RTX_FRAME_RELATED_P (save_insn) = 0;
30125 join_insn = emit_insn (gen_blockage ());
30126 REG_NOTES (join_insn) = note;
30127 RTX_FRAME_RELATED_P (join_insn) = 1;
30129 if (using_static_chain_p)
30131 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
30132 END_USE (0);
30134 else
30135 END_USE (11);
30138 /* Save CR if we use any that must be preserved. */
30139 if (!WORLD_SAVE_P (info) && info->cr_save_p)
30141 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
30142 GEN_INT (info->cr_save_offset + frame_off));
30143 rtx mem = gen_frame_mem (SImode, addr);
30145 /* If we didn't copy cr before, do so now using r0. */
30146 if (cr_save_rtx == NULL_RTX)
30148 START_USE (0);
30149 cr_save_rtx = gen_rtx_REG (SImode, 0);
30150 rs6000_emit_move_from_cr (cr_save_rtx);
30153 /* Saving CR requires a two-instruction sequence: one instruction
30154 to move the CR to a general-purpose register, and a second
30155 instruction that stores the GPR to memory.
30157 We do not emit any DWARF CFI records for the first of these,
30158 because we cannot properly represent the fact that CR is saved in
30159 a register. One reason is that we cannot express that multiple
30160 CR fields are saved; another reason is that on 64-bit, the size
30161 of the CR register in DWARF (4 bytes) differs from the size of
30162 a general-purpose register.
30164 This means if any intervening instruction were to clobber one of
30165 the call-saved CR fields, we'd have incorrect CFI. To prevent
30166 this from happening, we mark the store to memory as a use of
30167 those CR fields, which prevents any such instruction from being
30168 scheduled in between the two instructions. */
30169 rtx crsave_v[9];
30170 int n_crsave = 0;
30171 int i;
30173 crsave_v[n_crsave++] = gen_rtx_SET (mem, cr_save_rtx);
30174 for (i = 0; i < 8; i++)
30175 if (save_reg_p (CR0_REGNO + i))
30176 crsave_v[n_crsave++]
30177 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
30179 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode,
30180 gen_rtvec_v (n_crsave, crsave_v)));
30181 END_USE (REGNO (cr_save_rtx));
30183 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
30184 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
30185 so we need to construct a frame expression manually. */
30186 RTX_FRAME_RELATED_P (insn) = 1;
30188 /* Update address to be stack-pointer relative, like
30189 rs6000_frame_related would do. */
30190 addr = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
30191 GEN_INT (info->cr_save_offset + sp_off));
30192 mem = gen_frame_mem (SImode, addr);
30194 if (DEFAULT_ABI == ABI_ELFv2)
30196 /* In the ELFv2 ABI we generate separate CFI records for each
30197 CR field that was actually saved. They all point to the
30198 same 32-bit stack slot. */
30199 rtx crframe[8];
30200 int n_crframe = 0;
30202 for (i = 0; i < 8; i++)
30203 if (save_reg_p (CR0_REGNO + i))
30205 crframe[n_crframe]
30206 = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR0_REGNO + i));
30208 RTX_FRAME_RELATED_P (crframe[n_crframe]) = 1;
30209 n_crframe++;
30212 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
30213 gen_rtx_PARALLEL (VOIDmode,
30214 gen_rtvec_v (n_crframe, crframe)));
30216 else
30218 /* In other ABIs, by convention, we use a single CR regnum to
30219 represent the fact that all call-saved CR fields are saved.
30220 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
30221 rtx set = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR2_REGNO));
30222 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
30226 /* In the ELFv2 ABI we need to save all call-saved CR fields into
30227 *separate* slots if the routine calls __builtin_eh_return, so
30228 that they can be independently restored by the unwinder. */
30229 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
30231 int i, cr_off = info->ehcr_offset;
30232 rtx crsave;
30234 /* ??? We might get better performance by using multiple mfocrf
30235 instructions. */
30236 crsave = gen_rtx_REG (SImode, 0);
30237 emit_insn (gen_movesi_from_cr (crsave));
30239 for (i = 0; i < 8; i++)
30240 if (!call_used_regs[CR0_REGNO + i])
30242 rtvec p = rtvec_alloc (2);
30243 RTVEC_ELT (p, 0)
30244 = gen_frame_store (crsave, frame_reg_rtx, cr_off + frame_off);
30245 RTVEC_ELT (p, 1)
30246 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
30248 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
30250 RTX_FRAME_RELATED_P (insn) = 1;
30251 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
30252 gen_frame_store (gen_rtx_REG (SImode, CR0_REGNO + i),
30253 sp_reg_rtx, cr_off + sp_off));
30255 cr_off += reg_size;
30259 /* Update stack and set back pointer unless this is V.4,
30260 for which it was done previously. */
30261 if (!WORLD_SAVE_P (info) && info->push_p
30262 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
30264 rtx ptr_reg = NULL;
30265 int ptr_off = 0;
30267 /* If saving altivec regs we need to be able to address all save
30268 locations using a 16-bit offset. */
30269 if ((strategy & SAVE_INLINE_VRS) == 0
30270 || (info->altivec_size != 0
30271 && (info->altivec_save_offset + info->altivec_size - 16
30272 + info->total_size - frame_off) > 32767)
30273 || (info->vrsave_size != 0
30274 && (info->vrsave_save_offset
30275 + info->total_size - frame_off) > 32767))
30277 int sel = SAVRES_SAVE | SAVRES_VR;
30278 unsigned ptr_regno = ptr_regno_for_savres (sel);
30280 if (using_static_chain_p
30281 && ptr_regno == STATIC_CHAIN_REGNUM)
30282 ptr_regno = 12;
30283 if (REGNO (frame_reg_rtx) != ptr_regno)
30284 START_USE (ptr_regno);
30285 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
30286 frame_reg_rtx = ptr_reg;
30287 ptr_off = info->altivec_save_offset + info->altivec_size;
30288 frame_off = -ptr_off;
30290 else if (REGNO (frame_reg_rtx) == 1)
30291 frame_off = info->total_size;
30292 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
30293 ptr_reg, ptr_off);
30294 if (REGNO (frame_reg_rtx) == 12)
30295 sp_adjust = 0;
30296 sp_off = info->total_size;
30297 if (frame_reg_rtx != sp_reg_rtx)
30298 rs6000_emit_stack_tie (frame_reg_rtx, false);
30301 /* Set frame pointer, if needed. */
30302 if (frame_pointer_needed)
30304 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
30305 sp_reg_rtx);
30306 RTX_FRAME_RELATED_P (insn) = 1;
30309 /* Save AltiVec registers if needed. Save here because the red zone does
30310 not always include AltiVec registers. */
30311 if (!WORLD_SAVE_P (info)
30312 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
30314 int end_save = info->altivec_save_offset + info->altivec_size;
30315 int ptr_off;
30316 /* Oddly, the vector save/restore functions point r0 at the end
30317 of the save area, then use r11 or r12 to load offsets for
30318 [reg+reg] addressing. */
30319 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
30320 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
30321 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
30323 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
30324 NOT_INUSE (0);
30325 if (scratch_regno == 12)
30326 sp_adjust = 0;
30327 if (end_save + frame_off != 0)
30329 rtx offset = GEN_INT (end_save + frame_off);
30331 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
30333 else
30334 emit_move_insn (ptr_reg, frame_reg_rtx);
30336 ptr_off = -end_save;
30337 insn = rs6000_emit_savres_rtx (info, scratch_reg,
30338 info->altivec_save_offset + ptr_off,
30339 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
30340 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
30341 NULL_RTX, NULL_RTX);
30342 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
30344 /* The oddity mentioned above clobbered our frame reg. */
30345 emit_move_insn (frame_reg_rtx, ptr_reg);
30346 frame_off = ptr_off;
30349 else if (!WORLD_SAVE_P (info)
30350 && info->altivec_size != 0)
30352 int i;
30354 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
30355 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
30357 rtx areg, savereg, mem;
30358 HOST_WIDE_INT offset;
30360 offset = (info->altivec_save_offset + frame_off
30361 + 16 * (i - info->first_altivec_reg_save));
30363 savereg = gen_rtx_REG (V4SImode, i);
30365 if (TARGET_P9_DFORM_VECTOR && quad_address_offset_p (offset))
30367 mem = gen_frame_mem (V4SImode,
30368 gen_rtx_PLUS (Pmode, frame_reg_rtx,
30369 GEN_INT (offset)));
30370 insn = emit_insn (gen_rtx_SET (mem, savereg));
30371 areg = NULL_RTX;
30373 else
30375 NOT_INUSE (0);
30376 areg = gen_rtx_REG (Pmode, 0);
30377 emit_move_insn (areg, GEN_INT (offset));
30379 /* AltiVec addressing mode is [reg+reg]. */
30380 mem = gen_frame_mem (V4SImode,
30381 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
30383 /* Rather than emitting a generic move, force use of the stvx
30384 instruction, which we always want on ISA 2.07 (power8) systems.
30385 In particular we don't want xxpermdi/stxvd2x for little
30386 endian. */
30387 insn = emit_insn (gen_altivec_stvx_v4si_internal (mem, savereg));
30390 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
30391 areg, GEN_INT (offset));
30395 /* VRSAVE is a bit vector representing which AltiVec registers
30396 are used. The OS uses this to determine which vector
30397 registers to save on a context switch. We need to save
30398 VRSAVE on the stack frame, add whatever AltiVec registers we
30399 used in this function, and do the corresponding magic in the
30400 epilogue. */
30402 if (!WORLD_SAVE_P (info)
30403 && info->vrsave_size != 0)
30405 rtx reg, vrsave;
30406 int offset;
30407 int save_regno;
30409 /* Get VRSAVE onto a GPR. Note that ABI_V4 and ABI_DARWIN might
30410 be using r12 as frame_reg_rtx and r11 as the static chain
30411 pointer for nested functions. */
30412 save_regno = 12;
30413 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
30414 && !using_static_chain_p)
30415 save_regno = 11;
30416 else if (using_split_stack || REGNO (frame_reg_rtx) == 12)
30418 save_regno = 11;
30419 if (using_static_chain_p)
30420 save_regno = 0;
30423 NOT_INUSE (save_regno);
30424 reg = gen_rtx_REG (SImode, save_regno);
30425 vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
30426 if (TARGET_MACHO)
30427 emit_insn (gen_get_vrsave_internal (reg));
30428 else
30429 emit_insn (gen_rtx_SET (reg, vrsave));
30431 /* Save VRSAVE. */
30432 offset = info->vrsave_save_offset + frame_off;
30433 insn = emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
30435 /* Include the registers in the mask. */
30436 emit_insn (gen_iorsi3 (reg, reg, GEN_INT ((int) info->vrsave_mask)));
30438 insn = emit_insn (generate_set_vrsave (reg, info, 0));
30441 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
30442 if (!TARGET_SINGLE_PIC_BASE
30443 && ((TARGET_TOC && TARGET_MINIMAL_TOC
30444 && !constant_pool_empty_p ())
30445 || (DEFAULT_ABI == ABI_V4
30446 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
30447 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
30449 /* If emit_load_toc_table will use the link register, we need to save
30450 it. We use R12 for this purpose because emit_load_toc_table
30451 can use register 0. This allows us to use a plain 'blr' to return
30452 from the procedure more often. */
30453 int save_LR_around_toc_setup = (TARGET_ELF
30454 && DEFAULT_ABI == ABI_V4
30455 && flag_pic
30456 && ! info->lr_save_p
30457 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0);
30458 if (save_LR_around_toc_setup)
30460 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
30461 rtx tmp = gen_rtx_REG (Pmode, 12);
30463 sp_adjust = 0;
30464 insn = emit_move_insn (tmp, lr);
30465 RTX_FRAME_RELATED_P (insn) = 1;
30467 rs6000_emit_load_toc_table (TRUE);
30469 insn = emit_move_insn (lr, tmp);
30470 add_reg_note (insn, REG_CFA_RESTORE, lr);
30471 RTX_FRAME_RELATED_P (insn) = 1;
30473 else
30474 rs6000_emit_load_toc_table (TRUE);
30477 #if TARGET_MACHO
30478 if (!TARGET_SINGLE_PIC_BASE
30479 && DEFAULT_ABI == ABI_DARWIN
30480 && flag_pic && crtl->uses_pic_offset_table)
30482 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
30483 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
30485 /* Save and restore LR locally around this call (in R0). */
30486 if (!info->lr_save_p)
30487 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
30489 emit_insn (gen_load_macho_picbase (src));
30491 emit_move_insn (gen_rtx_REG (Pmode,
30492 RS6000_PIC_OFFSET_TABLE_REGNUM),
30493 lr);
30495 if (!info->lr_save_p)
30496 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
30498 #endif
30500 /* If we need to, save the TOC register after doing the stack setup.
30501 Do not emit eh frame info for this save. The unwinder wants info,
30502 conceptually attached to instructions in this function, about
30503 register values in the caller of this function. This R2 may have
30504 already been changed from the value in the caller.
30505 We don't attempt to write accurate DWARF EH frame info for R2
30506 because code emitted by gcc for a (non-pointer) function call
30507 doesn't save and restore R2. Instead, R2 is managed out-of-line
30508 by a linker generated plt call stub when the function resides in
30509 a shared library. This behavior is costly to describe in DWARF,
30510 both in terms of the size of DWARF info and the time taken in the
30511 unwinder to interpret it. R2 changes, apart from the
30512 calls_eh_return case earlier in this function, are handled by
30513 linux-unwind.h frob_update_context. */
30514 if (rs6000_save_toc_in_prologue_p ())
30516 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
30517 emit_insn (gen_frame_store (reg, sp_reg_rtx, RS6000_TOC_SAVE_SLOT));
30520 if (using_split_stack && split_stack_arg_pointer_used_p ())
30522 /* Set up the arg pointer (r12) for -fsplit-stack code. If
30523 __morestack was called, it left the arg pointer to the old
30524 stack in r29. Otherwise, the arg pointer is the top of the
30525 current frame. */
30526 cfun->machine->split_stack_argp_used = true;
30527 if (sp_adjust)
30529 rtx r12 = gen_rtx_REG (Pmode, 12);
30530 rtx set_r12 = gen_rtx_SET (r12, sp_reg_rtx);
30531 emit_insn_before (set_r12, sp_adjust);
30533 else if (frame_off != 0 || REGNO (frame_reg_rtx) != 12)
30535 rtx r12 = gen_rtx_REG (Pmode, 12);
30536 if (frame_off == 0)
30537 emit_move_insn (r12, frame_reg_rtx);
30538 else
30539 emit_insn (gen_add3_insn (r12, frame_reg_rtx, GEN_INT (frame_off)));
30541 if (info->push_p)
30543 rtx r12 = gen_rtx_REG (Pmode, 12);
30544 rtx r29 = gen_rtx_REG (Pmode, 29);
30545 rtx cr7 = gen_rtx_REG (CCUNSmode, CR7_REGNO);
30546 rtx not_more = gen_label_rtx ();
30547 rtx jump;
30549 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
30550 gen_rtx_GEU (VOIDmode, cr7, const0_rtx),
30551 gen_rtx_LABEL_REF (VOIDmode, not_more),
30552 pc_rtx);
30553 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
30554 JUMP_LABEL (jump) = not_more;
30555 LABEL_NUSES (not_more) += 1;
30556 emit_move_insn (r12, r29);
30557 emit_label (not_more);
30562 /* Output .extern statements for the save/restore routines we use. */
30564 static void
30565 rs6000_output_savres_externs (FILE *file)
30567 rs6000_stack_t *info = rs6000_stack_info ();
30569 if (TARGET_DEBUG_STACK)
30570 debug_stack_info (info);
30572 /* Write .extern for any function we will call to save and restore
30573 fp values. */
30574 if (info->first_fp_reg_save < 64
30575 && !TARGET_MACHO
30576 && !TARGET_ELF)
30578 char *name;
30579 int regno = info->first_fp_reg_save - 32;
30581 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
30583 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
30584 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
30585 name = rs6000_savres_routine_name (info, regno, sel);
30586 fprintf (file, "\t.extern %s\n", name);
30588 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
30590 bool lr = (info->savres_strategy
30591 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
30592 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
30593 name = rs6000_savres_routine_name (info, regno, sel);
30594 fprintf (file, "\t.extern %s\n", name);
30599 /* Write function prologue. */
30601 static void
30602 rs6000_output_function_prologue (FILE *file)
30604 if (!cfun->is_thunk)
30605 rs6000_output_savres_externs (file);
30607 /* ELFv2 ABI r2 setup code and local entry point. This must follow
30608 immediately after the global entry point label. */
30609 if (rs6000_global_entry_point_needed_p ())
30611 const char *name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
30613 (*targetm.asm_out.internal_label) (file, "LCF", rs6000_pic_labelno);
30615 if (TARGET_CMODEL != CMODEL_LARGE)
30617 /* In the small and medium code models, we assume the TOC is less
30618 2 GB away from the text section, so it can be computed via the
30619 following two-instruction sequence. */
30620 char buf[256];
30622 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
30623 fprintf (file, "0:\taddis 2,12,.TOC.-");
30624 assemble_name (file, buf);
30625 fprintf (file, "@ha\n");
30626 fprintf (file, "\taddi 2,2,.TOC.-");
30627 assemble_name (file, buf);
30628 fprintf (file, "@l\n");
30630 else
30632 /* In the large code model, we allow arbitrary offsets between the
30633 TOC and the text section, so we have to load the offset from
30634 memory. The data field is emitted directly before the global
30635 entry point in rs6000_elf_declare_function_name. */
30636 char buf[256];
30638 #ifdef HAVE_AS_ENTRY_MARKERS
30639 /* If supported by the linker, emit a marker relocation. If the
30640 total code size of the final executable or shared library
30641 happens to fit into 2 GB after all, the linker will replace
30642 this code sequence with the sequence for the small or medium
30643 code model. */
30644 fprintf (file, "\t.reloc .,R_PPC64_ENTRY\n");
30645 #endif
30646 fprintf (file, "\tld 2,");
30647 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
30648 assemble_name (file, buf);
30649 fprintf (file, "-");
30650 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
30651 assemble_name (file, buf);
30652 fprintf (file, "(12)\n");
30653 fprintf (file, "\tadd 2,2,12\n");
30656 fputs ("\t.localentry\t", file);
30657 assemble_name (file, name);
30658 fputs (",.-", file);
30659 assemble_name (file, name);
30660 fputs ("\n", file);
30663 /* Output -mprofile-kernel code. This needs to be done here instead of
30664 in output_function_profile since it must go after the ELFv2 ABI
30665 local entry point. */
30666 if (TARGET_PROFILE_KERNEL && crtl->profile)
30668 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
30669 gcc_assert (!TARGET_32BIT);
30671 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
30673 /* In the ELFv2 ABI we have no compiler stack word. It must be
30674 the resposibility of _mcount to preserve the static chain
30675 register if required. */
30676 if (DEFAULT_ABI != ABI_ELFv2
30677 && cfun->static_chain_decl != NULL)
30679 asm_fprintf (file, "\tstd %s,24(%s)\n",
30680 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
30681 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
30682 asm_fprintf (file, "\tld %s,24(%s)\n",
30683 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
30685 else
30686 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
30689 rs6000_pic_labelno++;
30692 /* -mprofile-kernel code calls mcount before the function prolog,
30693 so a profiled leaf function should stay a leaf function. */
30694 static bool
30695 rs6000_keep_leaf_when_profiled ()
30697 return TARGET_PROFILE_KERNEL;
30700 /* Non-zero if vmx regs are restored before the frame pop, zero if
30701 we restore after the pop when possible. */
30702 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
30704 /* Restoring cr is a two step process: loading a reg from the frame
30705 save, then moving the reg to cr. For ABI_V4 we must let the
30706 unwinder know that the stack location is no longer valid at or
30707 before the stack deallocation, but we can't emit a cfa_restore for
30708 cr at the stack deallocation like we do for other registers.
30709 The trouble is that it is possible for the move to cr to be
30710 scheduled after the stack deallocation. So say exactly where cr
30711 is located on each of the two insns. */
30713 static rtx
30714 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
30716 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
30717 rtx reg = gen_rtx_REG (SImode, regno);
30718 rtx_insn *insn = emit_move_insn (reg, mem);
30720 if (!exit_func && DEFAULT_ABI == ABI_V4)
30722 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
30723 rtx set = gen_rtx_SET (reg, cr);
30725 add_reg_note (insn, REG_CFA_REGISTER, set);
30726 RTX_FRAME_RELATED_P (insn) = 1;
30728 return reg;
30731 /* Reload CR from REG. */
30733 static void
30734 restore_saved_cr (rtx reg, int using_mfcr_multiple, bool exit_func)
30736 int count = 0;
30737 int i;
30739 if (using_mfcr_multiple)
30741 for (i = 0; i < 8; i++)
30742 if (save_reg_p (CR0_REGNO + i))
30743 count++;
30744 gcc_assert (count);
30747 if (using_mfcr_multiple && count > 1)
30749 rtx_insn *insn;
30750 rtvec p;
30751 int ndx;
30753 p = rtvec_alloc (count);
30755 ndx = 0;
30756 for (i = 0; i < 8; i++)
30757 if (save_reg_p (CR0_REGNO + i))
30759 rtvec r = rtvec_alloc (2);
30760 RTVEC_ELT (r, 0) = reg;
30761 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
30762 RTVEC_ELT (p, ndx) =
30763 gen_rtx_SET (gen_rtx_REG (CCmode, CR0_REGNO + i),
30764 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
30765 ndx++;
30767 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
30768 gcc_assert (ndx == count);
30770 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
30771 CR field separately. */
30772 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
30774 for (i = 0; i < 8; i++)
30775 if (save_reg_p (CR0_REGNO + i))
30776 add_reg_note (insn, REG_CFA_RESTORE,
30777 gen_rtx_REG (SImode, CR0_REGNO + i));
30779 RTX_FRAME_RELATED_P (insn) = 1;
30782 else
30783 for (i = 0; i < 8; i++)
30784 if (save_reg_p (CR0_REGNO + i))
30786 rtx insn = emit_insn (gen_movsi_to_cr_one
30787 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
30789 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
30790 CR field separately, attached to the insn that in fact
30791 restores this particular CR field. */
30792 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
30794 add_reg_note (insn, REG_CFA_RESTORE,
30795 gen_rtx_REG (SImode, CR0_REGNO + i));
30797 RTX_FRAME_RELATED_P (insn) = 1;
30801 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
30802 if (!exit_func && DEFAULT_ABI != ABI_ELFv2
30803 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
30805 rtx_insn *insn = get_last_insn ();
30806 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
30808 add_reg_note (insn, REG_CFA_RESTORE, cr);
30809 RTX_FRAME_RELATED_P (insn) = 1;
30813 /* Like cr, the move to lr instruction can be scheduled after the
30814 stack deallocation, but unlike cr, its stack frame save is still
30815 valid. So we only need to emit the cfa_restore on the correct
30816 instruction. */
30818 static void
30819 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
30821 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
30822 rtx reg = gen_rtx_REG (Pmode, regno);
30824 emit_move_insn (reg, mem);
30827 static void
30828 restore_saved_lr (int regno, bool exit_func)
30830 rtx reg = gen_rtx_REG (Pmode, regno);
30831 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
30832 rtx_insn *insn = emit_move_insn (lr, reg);
30834 if (!exit_func && flag_shrink_wrap)
30836 add_reg_note (insn, REG_CFA_RESTORE, lr);
30837 RTX_FRAME_RELATED_P (insn) = 1;
30841 static rtx
30842 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
30844 if (DEFAULT_ABI == ABI_ELFv2)
30846 int i;
30847 for (i = 0; i < 8; i++)
30848 if (save_reg_p (CR0_REGNO + i))
30850 rtx cr = gen_rtx_REG (SImode, CR0_REGNO + i);
30851 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, cr,
30852 cfa_restores);
30855 else if (info->cr_save_p)
30856 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
30857 gen_rtx_REG (SImode, CR2_REGNO),
30858 cfa_restores);
30860 if (info->lr_save_p)
30861 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
30862 gen_rtx_REG (Pmode, LR_REGNO),
30863 cfa_restores);
30864 return cfa_restores;
30867 /* Return true if OFFSET from stack pointer can be clobbered by signals.
30868 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
30869 below stack pointer not cloberred by signals. */
30871 static inline bool
30872 offset_below_red_zone_p (HOST_WIDE_INT offset)
30874 return offset < (DEFAULT_ABI == ABI_V4
30876 : TARGET_32BIT ? -220 : -288);
30879 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
30881 static void
30882 emit_cfa_restores (rtx cfa_restores)
30884 rtx_insn *insn = get_last_insn ();
30885 rtx *loc = &REG_NOTES (insn);
30887 while (*loc)
30888 loc = &XEXP (*loc, 1);
30889 *loc = cfa_restores;
30890 RTX_FRAME_RELATED_P (insn) = 1;
30893 /* Emit function epilogue as insns. */
30895 void
30896 rs6000_emit_epilogue (int sibcall)
30898 rs6000_stack_t *info;
30899 int restoring_GPRs_inline;
30900 int restoring_FPRs_inline;
30901 int using_load_multiple;
30902 int using_mtcr_multiple;
30903 int use_backchain_to_restore_sp;
30904 int restore_lr;
30905 int strategy;
30906 HOST_WIDE_INT frame_off = 0;
30907 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
30908 rtx frame_reg_rtx = sp_reg_rtx;
30909 rtx cfa_restores = NULL_RTX;
30910 rtx insn;
30911 rtx cr_save_reg = NULL_RTX;
30912 machine_mode reg_mode = Pmode;
30913 int reg_size = TARGET_32BIT ? 4 : 8;
30914 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
30915 ? DFmode : SFmode;
30916 int fp_reg_size = 8;
30917 int i;
30918 bool exit_func;
30919 unsigned ptr_regno;
30921 info = rs6000_stack_info ();
30923 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
30925 reg_mode = V2SImode;
30926 reg_size = 8;
30929 strategy = info->savres_strategy;
30930 using_load_multiple = strategy & REST_MULTIPLE;
30931 restoring_FPRs_inline = sibcall || (strategy & REST_INLINE_FPRS);
30932 restoring_GPRs_inline = sibcall || (strategy & REST_INLINE_GPRS);
30933 using_mtcr_multiple = (rs6000_cpu == PROCESSOR_PPC601
30934 || rs6000_cpu == PROCESSOR_PPC603
30935 || rs6000_cpu == PROCESSOR_PPC750
30936 || optimize_size);
30937 /* Restore via the backchain when we have a large frame, since this
30938 is more efficient than an addis, addi pair. The second condition
30939 here will not trigger at the moment; We don't actually need a
30940 frame pointer for alloca, but the generic parts of the compiler
30941 give us one anyway. */
30942 use_backchain_to_restore_sp = (info->total_size + (info->lr_save_p
30943 ? info->lr_save_offset
30944 : 0) > 32767
30945 || (cfun->calls_alloca
30946 && !frame_pointer_needed));
30947 restore_lr = (info->lr_save_p
30948 && (restoring_FPRs_inline
30949 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
30950 && (restoring_GPRs_inline
30951 || info->first_fp_reg_save < 64)
30952 && !cfun->machine->lr_is_wrapped_separately);
30955 if (WORLD_SAVE_P (info))
30957 int i, j;
30958 char rname[30];
30959 const char *alloc_rname;
30960 rtvec p;
30962 /* eh_rest_world_r10 will return to the location saved in the LR
30963 stack slot (which is not likely to be our caller.)
30964 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
30965 rest_world is similar, except any R10 parameter is ignored.
30966 The exception-handling stuff that was here in 2.95 is no
30967 longer necessary. */
30969 p = rtvec_alloc (9
30970 + 32 - info->first_gp_reg_save
30971 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
30972 + 63 + 1 - info->first_fp_reg_save);
30974 strcpy (rname, ((crtl->calls_eh_return) ?
30975 "*eh_rest_world_r10" : "*rest_world"));
30976 alloc_rname = ggc_strdup (rname);
30978 j = 0;
30979 RTVEC_ELT (p, j++) = ret_rtx;
30980 RTVEC_ELT (p, j++)
30981 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
30982 /* The instruction pattern requires a clobber here;
30983 it is shared with the restVEC helper. */
30984 RTVEC_ELT (p, j++)
30985 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 11));
30988 /* CR register traditionally saved as CR2. */
30989 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
30990 RTVEC_ELT (p, j++)
30991 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
30992 if (flag_shrink_wrap)
30994 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
30995 gen_rtx_REG (Pmode, LR_REGNO),
30996 cfa_restores);
30997 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
31001 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
31003 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
31004 RTVEC_ELT (p, j++)
31005 = gen_frame_load (reg,
31006 frame_reg_rtx, info->gp_save_offset + reg_size * i);
31007 if (flag_shrink_wrap)
31008 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
31010 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
31012 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
31013 RTVEC_ELT (p, j++)
31014 = gen_frame_load (reg,
31015 frame_reg_rtx, info->altivec_save_offset + 16 * i);
31016 if (flag_shrink_wrap)
31017 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
31019 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
31021 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
31022 ? DFmode : SFmode),
31023 info->first_fp_reg_save + i);
31024 RTVEC_ELT (p, j++)
31025 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
31026 if (flag_shrink_wrap)
31027 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
31029 RTVEC_ELT (p, j++)
31030 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 0));
31031 RTVEC_ELT (p, j++)
31032 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 12));
31033 RTVEC_ELT (p, j++)
31034 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 7));
31035 RTVEC_ELT (p, j++)
31036 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 8));
31037 RTVEC_ELT (p, j++)
31038 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
31039 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
31041 if (flag_shrink_wrap)
31043 REG_NOTES (insn) = cfa_restores;
31044 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
31045 RTX_FRAME_RELATED_P (insn) = 1;
31047 return;
31050 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
31051 if (info->push_p)
31052 frame_off = info->total_size;
31054 /* Restore AltiVec registers if we must do so before adjusting the
31055 stack. */
31056 if (info->altivec_size != 0
31057 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
31058 || (DEFAULT_ABI != ABI_V4
31059 && offset_below_red_zone_p (info->altivec_save_offset))))
31061 int i;
31062 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
31064 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
31065 if (use_backchain_to_restore_sp)
31067 int frame_regno = 11;
31069 if ((strategy & REST_INLINE_VRS) == 0)
31071 /* Of r11 and r12, select the one not clobbered by an
31072 out-of-line restore function for the frame register. */
31073 frame_regno = 11 + 12 - scratch_regno;
31075 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
31076 emit_move_insn (frame_reg_rtx,
31077 gen_rtx_MEM (Pmode, sp_reg_rtx));
31078 frame_off = 0;
31080 else if (frame_pointer_needed)
31081 frame_reg_rtx = hard_frame_pointer_rtx;
31083 if ((strategy & REST_INLINE_VRS) == 0)
31085 int end_save = info->altivec_save_offset + info->altivec_size;
31086 int ptr_off;
31087 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
31088 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
31090 if (end_save + frame_off != 0)
31092 rtx offset = GEN_INT (end_save + frame_off);
31094 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
31096 else
31097 emit_move_insn (ptr_reg, frame_reg_rtx);
31099 ptr_off = -end_save;
31100 insn = rs6000_emit_savres_rtx (info, scratch_reg,
31101 info->altivec_save_offset + ptr_off,
31102 0, V4SImode, SAVRES_VR);
31104 else
31106 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
31107 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
31109 rtx addr, areg, mem, insn;
31110 rtx reg = gen_rtx_REG (V4SImode, i);
31111 HOST_WIDE_INT offset
31112 = (info->altivec_save_offset + frame_off
31113 + 16 * (i - info->first_altivec_reg_save));
31115 if (TARGET_P9_DFORM_VECTOR && quad_address_offset_p (offset))
31117 mem = gen_frame_mem (V4SImode,
31118 gen_rtx_PLUS (Pmode, frame_reg_rtx,
31119 GEN_INT (offset)));
31120 insn = gen_rtx_SET (reg, mem);
31122 else
31124 areg = gen_rtx_REG (Pmode, 0);
31125 emit_move_insn (areg, GEN_INT (offset));
31127 /* AltiVec addressing mode is [reg+reg]. */
31128 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
31129 mem = gen_frame_mem (V4SImode, addr);
31131 /* Rather than emitting a generic move, force use of the
31132 lvx instruction, which we always want. In particular we
31133 don't want lxvd2x/xxpermdi for little endian. */
31134 insn = gen_altivec_lvx_v4si_internal (reg, mem);
31137 (void) emit_insn (insn);
31141 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
31142 if (((strategy & REST_INLINE_VRS) == 0
31143 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
31144 && (flag_shrink_wrap
31145 || (offset_below_red_zone_p
31146 (info->altivec_save_offset
31147 + 16 * (i - info->first_altivec_reg_save)))))
31149 rtx reg = gen_rtx_REG (V4SImode, i);
31150 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
31154 /* Restore VRSAVE if we must do so before adjusting the stack. */
31155 if (info->vrsave_size != 0
31156 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
31157 || (DEFAULT_ABI != ABI_V4
31158 && offset_below_red_zone_p (info->vrsave_save_offset))))
31160 rtx reg;
31162 if (frame_reg_rtx == sp_reg_rtx)
31164 if (use_backchain_to_restore_sp)
31166 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
31167 emit_move_insn (frame_reg_rtx,
31168 gen_rtx_MEM (Pmode, sp_reg_rtx));
31169 frame_off = 0;
31171 else if (frame_pointer_needed)
31172 frame_reg_rtx = hard_frame_pointer_rtx;
31175 reg = gen_rtx_REG (SImode, 12);
31176 emit_insn (gen_frame_load (reg, frame_reg_rtx,
31177 info->vrsave_save_offset + frame_off));
31179 emit_insn (generate_set_vrsave (reg, info, 1));
31182 insn = NULL_RTX;
31183 /* If we have a large stack frame, restore the old stack pointer
31184 using the backchain. */
31185 if (use_backchain_to_restore_sp)
31187 if (frame_reg_rtx == sp_reg_rtx)
31189 /* Under V.4, don't reset the stack pointer until after we're done
31190 loading the saved registers. */
31191 if (DEFAULT_ABI == ABI_V4)
31192 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
31194 insn = emit_move_insn (frame_reg_rtx,
31195 gen_rtx_MEM (Pmode, sp_reg_rtx));
31196 frame_off = 0;
31198 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
31199 && DEFAULT_ABI == ABI_V4)
31200 /* frame_reg_rtx has been set up by the altivec restore. */
31202 else
31204 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
31205 frame_reg_rtx = sp_reg_rtx;
31208 /* If we have a frame pointer, we can restore the old stack pointer
31209 from it. */
31210 else if (frame_pointer_needed)
31212 frame_reg_rtx = sp_reg_rtx;
31213 if (DEFAULT_ABI == ABI_V4)
31214 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
31215 /* Prevent reordering memory accesses against stack pointer restore. */
31216 else if (cfun->calls_alloca
31217 || offset_below_red_zone_p (-info->total_size))
31218 rs6000_emit_stack_tie (frame_reg_rtx, true);
31220 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
31221 GEN_INT (info->total_size)));
31222 frame_off = 0;
31224 else if (info->push_p
31225 && DEFAULT_ABI != ABI_V4
31226 && !crtl->calls_eh_return)
31228 /* Prevent reordering memory accesses against stack pointer restore. */
31229 if (cfun->calls_alloca
31230 || offset_below_red_zone_p (-info->total_size))
31231 rs6000_emit_stack_tie (frame_reg_rtx, false);
31232 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
31233 GEN_INT (info->total_size)));
31234 frame_off = 0;
31236 if (insn && frame_reg_rtx == sp_reg_rtx)
31238 if (cfa_restores)
31240 REG_NOTES (insn) = cfa_restores;
31241 cfa_restores = NULL_RTX;
31243 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
31244 RTX_FRAME_RELATED_P (insn) = 1;
31247 /* Restore AltiVec registers if we have not done so already. */
31248 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
31249 && info->altivec_size != 0
31250 && (DEFAULT_ABI == ABI_V4
31251 || !offset_below_red_zone_p (info->altivec_save_offset)))
31253 int i;
31255 if ((strategy & REST_INLINE_VRS) == 0)
31257 int end_save = info->altivec_save_offset + info->altivec_size;
31258 int ptr_off;
31259 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
31260 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
31261 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
31263 if (end_save + frame_off != 0)
31265 rtx offset = GEN_INT (end_save + frame_off);
31267 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
31269 else
31270 emit_move_insn (ptr_reg, frame_reg_rtx);
31272 ptr_off = -end_save;
31273 insn = rs6000_emit_savres_rtx (info, scratch_reg,
31274 info->altivec_save_offset + ptr_off,
31275 0, V4SImode, SAVRES_VR);
31276 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
31278 /* Frame reg was clobbered by out-of-line save. Restore it
31279 from ptr_reg, and if we are calling out-of-line gpr or
31280 fpr restore set up the correct pointer and offset. */
31281 unsigned newptr_regno = 1;
31282 if (!restoring_GPRs_inline)
31284 bool lr = info->gp_save_offset + info->gp_size == 0;
31285 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
31286 newptr_regno = ptr_regno_for_savres (sel);
31287 end_save = info->gp_save_offset + info->gp_size;
31289 else if (!restoring_FPRs_inline)
31291 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
31292 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
31293 newptr_regno = ptr_regno_for_savres (sel);
31294 end_save = info->fp_save_offset + info->fp_size;
31297 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
31298 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
31300 if (end_save + ptr_off != 0)
31302 rtx offset = GEN_INT (end_save + ptr_off);
31304 frame_off = -end_save;
31305 if (TARGET_32BIT)
31306 emit_insn (gen_addsi3_carry (frame_reg_rtx,
31307 ptr_reg, offset));
31308 else
31309 emit_insn (gen_adddi3_carry (frame_reg_rtx,
31310 ptr_reg, offset));
31312 else
31314 frame_off = ptr_off;
31315 emit_move_insn (frame_reg_rtx, ptr_reg);
31319 else
31321 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
31322 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
31324 rtx addr, areg, mem, insn;
31325 rtx reg = gen_rtx_REG (V4SImode, i);
31326 HOST_WIDE_INT offset
31327 = (info->altivec_save_offset + frame_off
31328 + 16 * (i - info->first_altivec_reg_save));
31330 if (TARGET_P9_DFORM_VECTOR && quad_address_offset_p (offset))
31332 mem = gen_frame_mem (V4SImode,
31333 gen_rtx_PLUS (Pmode, frame_reg_rtx,
31334 GEN_INT (offset)));
31335 insn = gen_rtx_SET (reg, mem);
31337 else
31339 areg = gen_rtx_REG (Pmode, 0);
31340 emit_move_insn (areg, GEN_INT (offset));
31342 /* AltiVec addressing mode is [reg+reg]. */
31343 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
31344 mem = gen_frame_mem (V4SImode, addr);
31346 /* Rather than emitting a generic move, force use of the
31347 lvx instruction, which we always want. In particular we
31348 don't want lxvd2x/xxpermdi for little endian. */
31349 insn = gen_altivec_lvx_v4si_internal (reg, mem);
31352 (void) emit_insn (insn);
31356 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
31357 if (((strategy & REST_INLINE_VRS) == 0
31358 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
31359 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
31361 rtx reg = gen_rtx_REG (V4SImode, i);
31362 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
31366 /* Restore VRSAVE if we have not done so already. */
31367 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
31368 && info->vrsave_size != 0
31369 && (DEFAULT_ABI == ABI_V4
31370 || !offset_below_red_zone_p (info->vrsave_save_offset)))
31372 rtx reg;
31374 reg = gen_rtx_REG (SImode, 12);
31375 emit_insn (gen_frame_load (reg, frame_reg_rtx,
31376 info->vrsave_save_offset + frame_off));
31378 emit_insn (generate_set_vrsave (reg, info, 1));
31381 /* If we exit by an out-of-line restore function on ABI_V4 then that
31382 function will deallocate the stack, so we don't need to worry
31383 about the unwinder restoring cr from an invalid stack frame
31384 location. */
31385 exit_func = (!restoring_FPRs_inline
31386 || (!restoring_GPRs_inline
31387 && info->first_fp_reg_save == 64));
31389 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
31390 *separate* slots if the routine calls __builtin_eh_return, so
31391 that they can be independently restored by the unwinder. */
31392 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
31394 int i, cr_off = info->ehcr_offset;
31396 for (i = 0; i < 8; i++)
31397 if (!call_used_regs[CR0_REGNO + i])
31399 rtx reg = gen_rtx_REG (SImode, 0);
31400 emit_insn (gen_frame_load (reg, frame_reg_rtx,
31401 cr_off + frame_off));
31403 insn = emit_insn (gen_movsi_to_cr_one
31404 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
31406 if (!exit_func && flag_shrink_wrap)
31408 add_reg_note (insn, REG_CFA_RESTORE,
31409 gen_rtx_REG (SImode, CR0_REGNO + i));
31411 RTX_FRAME_RELATED_P (insn) = 1;
31414 cr_off += reg_size;
31418 /* Get the old lr if we saved it. If we are restoring registers
31419 out-of-line, then the out-of-line routines can do this for us. */
31420 if (restore_lr && restoring_GPRs_inline)
31421 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
31423 /* Get the old cr if we saved it. */
31424 if (info->cr_save_p)
31426 unsigned cr_save_regno = 12;
31428 if (!restoring_GPRs_inline)
31430 /* Ensure we don't use the register used by the out-of-line
31431 gpr register restore below. */
31432 bool lr = info->gp_save_offset + info->gp_size == 0;
31433 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
31434 int gpr_ptr_regno = ptr_regno_for_savres (sel);
31436 if (gpr_ptr_regno == 12)
31437 cr_save_regno = 11;
31438 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
31440 else if (REGNO (frame_reg_rtx) == 12)
31441 cr_save_regno = 11;
31443 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
31444 info->cr_save_offset + frame_off,
31445 exit_func);
31448 /* Set LR here to try to overlap restores below. */
31449 if (restore_lr && restoring_GPRs_inline)
31450 restore_saved_lr (0, exit_func);
31452 /* Load exception handler data registers, if needed. */
31453 if (crtl->calls_eh_return)
31455 unsigned int i, regno;
31457 if (TARGET_AIX)
31459 rtx reg = gen_rtx_REG (reg_mode, 2);
31460 emit_insn (gen_frame_load (reg, frame_reg_rtx,
31461 frame_off + RS6000_TOC_SAVE_SLOT));
31464 for (i = 0; ; ++i)
31466 rtx mem;
31468 regno = EH_RETURN_DATA_REGNO (i);
31469 if (regno == INVALID_REGNUM)
31470 break;
31472 /* Note: possible use of r0 here to address SPE regs. */
31473 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
31474 info->ehrd_offset + frame_off
31475 + reg_size * (int) i);
31477 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
31481 /* Restore GPRs. This is done as a PARALLEL if we are using
31482 the load-multiple instructions. */
31483 if (TARGET_SPE_ABI
31484 && info->spe_64bit_regs_used
31485 && info->first_gp_reg_save != 32)
31487 /* Determine whether we can address all of the registers that need
31488 to be saved with an offset from frame_reg_rtx that fits in
31489 the small const field for SPE memory instructions. */
31490 int spe_regs_addressable
31491 = (SPE_CONST_OFFSET_OK (info->spe_gp_save_offset + frame_off
31492 + reg_size * (32 - info->first_gp_reg_save - 1))
31493 && restoring_GPRs_inline);
31495 if (!spe_regs_addressable)
31497 int ool_adjust = 0;
31498 rtx old_frame_reg_rtx = frame_reg_rtx;
31499 /* Make r11 point to the start of the SPE save area. We worried about
31500 not clobbering it when we were saving registers in the prologue.
31501 There's no need to worry here because the static chain is passed
31502 anew to every function. */
31504 if (!restoring_GPRs_inline)
31505 ool_adjust = 8 * (info->first_gp_reg_save - FIRST_SAVED_GP_REGNO);
31506 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
31507 emit_insn (gen_addsi3 (frame_reg_rtx, old_frame_reg_rtx,
31508 GEN_INT (info->spe_gp_save_offset
31509 + frame_off
31510 - ool_adjust)));
31511 /* Keep the invariant that frame_reg_rtx + frame_off points
31512 at the top of the stack frame. */
31513 frame_off = -info->spe_gp_save_offset + ool_adjust;
31516 if (restoring_GPRs_inline)
31518 HOST_WIDE_INT spe_offset = info->spe_gp_save_offset + frame_off;
31520 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
31521 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
31523 rtx offset, addr, mem, reg;
31525 /* We're doing all this to ensure that the immediate offset
31526 fits into the immediate field of 'evldd'. */
31527 gcc_assert (SPE_CONST_OFFSET_OK (spe_offset + reg_size * i));
31529 offset = GEN_INT (spe_offset + reg_size * i);
31530 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, offset);
31531 mem = gen_rtx_MEM (V2SImode, addr);
31532 reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
31534 emit_move_insn (reg, mem);
31537 else
31538 rs6000_emit_savres_rtx (info, frame_reg_rtx,
31539 info->spe_gp_save_offset + frame_off,
31540 info->lr_save_offset + frame_off,
31541 reg_mode,
31542 SAVRES_GPR | SAVRES_LR);
31544 else if (!restoring_GPRs_inline)
31546 /* We are jumping to an out-of-line function. */
31547 rtx ptr_reg;
31548 int end_save = info->gp_save_offset + info->gp_size;
31549 bool can_use_exit = end_save == 0;
31550 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
31551 int ptr_off;
31553 /* Emit stack reset code if we need it. */
31554 ptr_regno = ptr_regno_for_savres (sel);
31555 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
31556 if (can_use_exit)
31557 rs6000_emit_stack_reset (info, frame_reg_rtx, frame_off, ptr_regno);
31558 else if (end_save + frame_off != 0)
31559 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
31560 GEN_INT (end_save + frame_off)));
31561 else if (REGNO (frame_reg_rtx) != ptr_regno)
31562 emit_move_insn (ptr_reg, frame_reg_rtx);
31563 if (REGNO (frame_reg_rtx) == ptr_regno)
31564 frame_off = -end_save;
31566 if (can_use_exit && info->cr_save_p)
31567 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
31569 ptr_off = -end_save;
31570 rs6000_emit_savres_rtx (info, ptr_reg,
31571 info->gp_save_offset + ptr_off,
31572 info->lr_save_offset + ptr_off,
31573 reg_mode, sel);
31575 else if (using_load_multiple)
31577 rtvec p;
31578 p = rtvec_alloc (32 - info->first_gp_reg_save);
31579 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
31580 RTVEC_ELT (p, i)
31581 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
31582 frame_reg_rtx,
31583 info->gp_save_offset + frame_off + reg_size * i);
31584 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
31586 else
31588 int offset = info->gp_save_offset + frame_off;
31589 for (i = info->first_gp_reg_save; i < 32; i++)
31591 if (rs6000_reg_live_or_pic_offset_p (i)
31592 && !cfun->machine->gpr_is_wrapped_separately[i])
31594 rtx reg = gen_rtx_REG (reg_mode, i);
31595 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
31598 offset += reg_size;
31602 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
31604 /* If the frame pointer was used then we can't delay emitting
31605 a REG_CFA_DEF_CFA note. This must happen on the insn that
31606 restores the frame pointer, r31. We may have already emitted
31607 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
31608 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
31609 be harmless if emitted. */
31610 if (frame_pointer_needed)
31612 insn = get_last_insn ();
31613 add_reg_note (insn, REG_CFA_DEF_CFA,
31614 plus_constant (Pmode, frame_reg_rtx, frame_off));
31615 RTX_FRAME_RELATED_P (insn) = 1;
31618 /* Set up cfa_restores. We always need these when
31619 shrink-wrapping. If not shrink-wrapping then we only need
31620 the cfa_restore when the stack location is no longer valid.
31621 The cfa_restores must be emitted on or before the insn that
31622 invalidates the stack, and of course must not be emitted
31623 before the insn that actually does the restore. The latter
31624 is why it is a bad idea to emit the cfa_restores as a group
31625 on the last instruction here that actually does a restore:
31626 That insn may be reordered with respect to others doing
31627 restores. */
31628 if (flag_shrink_wrap
31629 && !restoring_GPRs_inline
31630 && info->first_fp_reg_save == 64)
31631 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
31633 for (i = info->first_gp_reg_save; i < 32; i++)
31634 if (!restoring_GPRs_inline
31635 || using_load_multiple
31636 || rs6000_reg_live_or_pic_offset_p (i))
31638 if (cfun->machine->gpr_is_wrapped_separately[i])
31639 continue;
31641 rtx reg = gen_rtx_REG (reg_mode, i);
31642 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
31646 if (!restoring_GPRs_inline
31647 && info->first_fp_reg_save == 64)
31649 /* We are jumping to an out-of-line function. */
31650 if (cfa_restores)
31651 emit_cfa_restores (cfa_restores);
31652 return;
31655 if (restore_lr && !restoring_GPRs_inline)
31657 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
31658 restore_saved_lr (0, exit_func);
31661 /* Restore fpr's if we need to do it without calling a function. */
31662 if (restoring_FPRs_inline)
31664 int offset = info->fp_save_offset + frame_off;
31665 for (i = info->first_fp_reg_save; i < 64; i++)
31667 if (save_reg_p (i)
31668 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
31670 rtx reg = gen_rtx_REG (fp_reg_mode, i);
31671 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
31672 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
31673 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
31674 cfa_restores);
31677 offset += fp_reg_size;
31681 /* If we saved cr, restore it here. Just those that were used. */
31682 if (info->cr_save_p)
31683 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
31685 /* If this is V.4, unwind the stack pointer after all of the loads
31686 have been done, or set up r11 if we are restoring fp out of line. */
31687 ptr_regno = 1;
31688 if (!restoring_FPRs_inline)
31690 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
31691 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
31692 ptr_regno = ptr_regno_for_savres (sel);
31695 insn = rs6000_emit_stack_reset (info, frame_reg_rtx, frame_off, ptr_regno);
31696 if (REGNO (frame_reg_rtx) == ptr_regno)
31697 frame_off = 0;
31699 if (insn && restoring_FPRs_inline)
31701 if (cfa_restores)
31703 REG_NOTES (insn) = cfa_restores;
31704 cfa_restores = NULL_RTX;
31706 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
31707 RTX_FRAME_RELATED_P (insn) = 1;
31710 if (crtl->calls_eh_return)
31712 rtx sa = EH_RETURN_STACKADJ_RTX;
31713 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
31716 if (!sibcall && restoring_FPRs_inline)
31718 if (cfa_restores)
31720 /* We can't hang the cfa_restores off a simple return,
31721 since the shrink-wrap code sometimes uses an existing
31722 return. This means there might be a path from
31723 pre-prologue code to this return, and dwarf2cfi code
31724 wants the eh_frame unwinder state to be the same on
31725 all paths to any point. So we need to emit the
31726 cfa_restores before the return. For -m64 we really
31727 don't need epilogue cfa_restores at all, except for
31728 this irritating dwarf2cfi with shrink-wrap
31729 requirement; The stack red-zone means eh_frame info
31730 from the prologue telling the unwinder to restore
31731 from the stack is perfectly good right to the end of
31732 the function. */
31733 emit_insn (gen_blockage ());
31734 emit_cfa_restores (cfa_restores);
31735 cfa_restores = NULL_RTX;
31738 emit_jump_insn (targetm.gen_simple_return ());
31741 if (!sibcall && !restoring_FPRs_inline)
31743 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
31744 rtvec p = rtvec_alloc (3 + !!lr + 64 - info->first_fp_reg_save);
31745 int elt = 0;
31746 RTVEC_ELT (p, elt++) = ret_rtx;
31747 if (lr)
31748 RTVEC_ELT (p, elt++)
31749 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
31751 /* We have to restore more than two FP registers, so branch to the
31752 restore function. It will return to our caller. */
31753 int i;
31754 int reg;
31755 rtx sym;
31757 if (flag_shrink_wrap)
31758 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
31760 sym = rs6000_savres_routine_sym (info, SAVRES_FPR | (lr ? SAVRES_LR : 0));
31761 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, sym);
31762 reg = (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)? 1 : 11;
31763 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, reg));
31765 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
31767 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
31769 RTVEC_ELT (p, elt++)
31770 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
31771 if (flag_shrink_wrap)
31772 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
31775 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
31778 if (cfa_restores)
31780 if (sibcall)
31781 /* Ensure the cfa_restores are hung off an insn that won't
31782 be reordered above other restores. */
31783 emit_insn (gen_blockage ());
31785 emit_cfa_restores (cfa_restores);
31789 /* Write function epilogue. */
31791 static void
31792 rs6000_output_function_epilogue (FILE *file)
31794 #if TARGET_MACHO
31795 macho_branch_islands ();
31798 rtx_insn *insn = get_last_insn ();
31799 rtx_insn *deleted_debug_label = NULL;
31801 /* Mach-O doesn't support labels at the end of objects, so if
31802 it looks like we might want one, take special action.
31804 First, collect any sequence of deleted debug labels. */
31805 while (insn
31806 && NOTE_P (insn)
31807 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
31809 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
31810 notes only, instead set their CODE_LABEL_NUMBER to -1,
31811 otherwise there would be code generation differences
31812 in between -g and -g0. */
31813 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
31814 deleted_debug_label = insn;
31815 insn = PREV_INSN (insn);
31818 /* Second, if we have:
31819 label:
31820 barrier
31821 then this needs to be detected, so skip past the barrier. */
31823 if (insn && BARRIER_P (insn))
31824 insn = PREV_INSN (insn);
31826 /* Up to now we've only seen notes or barriers. */
31827 if (insn)
31829 if (LABEL_P (insn)
31830 || (NOTE_P (insn)
31831 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL))
31832 /* Trailing label: <barrier>. */
31833 fputs ("\tnop\n", file);
31834 else
31836 /* Lastly, see if we have a completely empty function body. */
31837 while (insn && ! INSN_P (insn))
31838 insn = PREV_INSN (insn);
31839 /* If we don't find any insns, we've got an empty function body;
31840 I.e. completely empty - without a return or branch. This is
31841 taken as the case where a function body has been removed
31842 because it contains an inline __builtin_unreachable(). GCC
31843 states that reaching __builtin_unreachable() means UB so we're
31844 not obliged to do anything special; however, we want
31845 non-zero-sized function bodies. To meet this, and help the
31846 user out, let's trap the case. */
31847 if (insn == NULL)
31848 fputs ("\ttrap\n", file);
31851 else if (deleted_debug_label)
31852 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
31853 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
31854 CODE_LABEL_NUMBER (insn) = -1;
31856 #endif
31858 /* Output a traceback table here. See /usr/include/sys/debug.h for info
31859 on its format.
31861 We don't output a traceback table if -finhibit-size-directive was
31862 used. The documentation for -finhibit-size-directive reads
31863 ``don't output a @code{.size} assembler directive, or anything
31864 else that would cause trouble if the function is split in the
31865 middle, and the two halves are placed at locations far apart in
31866 memory.'' The traceback table has this property, since it
31867 includes the offset from the start of the function to the
31868 traceback table itself.
31870 System V.4 Powerpc's (and the embedded ABI derived from it) use a
31871 different traceback table. */
31872 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
31873 && ! flag_inhibit_size_directive
31874 && rs6000_traceback != traceback_none && !cfun->is_thunk)
31876 const char *fname = NULL;
31877 const char *language_string = lang_hooks.name;
31878 int fixed_parms = 0, float_parms = 0, parm_info = 0;
31879 int i;
31880 int optional_tbtab;
31881 rs6000_stack_t *info = rs6000_stack_info ();
31883 if (rs6000_traceback == traceback_full)
31884 optional_tbtab = 1;
31885 else if (rs6000_traceback == traceback_part)
31886 optional_tbtab = 0;
31887 else
31888 optional_tbtab = !optimize_size && !TARGET_ELF;
31890 if (optional_tbtab)
31892 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
31893 while (*fname == '.') /* V.4 encodes . in the name */
31894 fname++;
31896 /* Need label immediately before tbtab, so we can compute
31897 its offset from the function start. */
31898 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
31899 ASM_OUTPUT_LABEL (file, fname);
31902 /* The .tbtab pseudo-op can only be used for the first eight
31903 expressions, since it can't handle the possibly variable
31904 length fields that follow. However, if you omit the optional
31905 fields, the assembler outputs zeros for all optional fields
31906 anyways, giving each variable length field is minimum length
31907 (as defined in sys/debug.h). Thus we can not use the .tbtab
31908 pseudo-op at all. */
31910 /* An all-zero word flags the start of the tbtab, for debuggers
31911 that have to find it by searching forward from the entry
31912 point or from the current pc. */
31913 fputs ("\t.long 0\n", file);
31915 /* Tbtab format type. Use format type 0. */
31916 fputs ("\t.byte 0,", file);
31918 /* Language type. Unfortunately, there does not seem to be any
31919 official way to discover the language being compiled, so we
31920 use language_string.
31921 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
31922 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
31923 a number, so for now use 9. LTO, Go and JIT aren't assigned numbers
31924 either, so for now use 0. */
31925 if (lang_GNU_C ()
31926 || ! strcmp (language_string, "GNU GIMPLE")
31927 || ! strcmp (language_string, "GNU Go")
31928 || ! strcmp (language_string, "libgccjit"))
31929 i = 0;
31930 else if (! strcmp (language_string, "GNU F77")
31931 || lang_GNU_Fortran ())
31932 i = 1;
31933 else if (! strcmp (language_string, "GNU Pascal"))
31934 i = 2;
31935 else if (! strcmp (language_string, "GNU Ada"))
31936 i = 3;
31937 else if (lang_GNU_CXX ()
31938 || ! strcmp (language_string, "GNU Objective-C++"))
31939 i = 9;
31940 else if (! strcmp (language_string, "GNU Java"))
31941 i = 13;
31942 else if (! strcmp (language_string, "GNU Objective-C"))
31943 i = 14;
31944 else
31945 gcc_unreachable ();
31946 fprintf (file, "%d,", i);
31948 /* 8 single bit fields: global linkage (not set for C extern linkage,
31949 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
31950 from start of procedure stored in tbtab, internal function, function
31951 has controlled storage, function has no toc, function uses fp,
31952 function logs/aborts fp operations. */
31953 /* Assume that fp operations are used if any fp reg must be saved. */
31954 fprintf (file, "%d,",
31955 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
31957 /* 6 bitfields: function is interrupt handler, name present in
31958 proc table, function calls alloca, on condition directives
31959 (controls stack walks, 3 bits), saves condition reg, saves
31960 link reg. */
31961 /* The `function calls alloca' bit seems to be set whenever reg 31 is
31962 set up as a frame pointer, even when there is no alloca call. */
31963 fprintf (file, "%d,",
31964 ((optional_tbtab << 6)
31965 | ((optional_tbtab & frame_pointer_needed) << 5)
31966 | (info->cr_save_p << 1)
31967 | (info->lr_save_p)));
31969 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
31970 (6 bits). */
31971 fprintf (file, "%d,",
31972 (info->push_p << 7) | (64 - info->first_fp_reg_save));
31974 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
31975 fprintf (file, "%d,", (32 - first_reg_to_save ()));
31977 if (optional_tbtab)
31979 /* Compute the parameter info from the function decl argument
31980 list. */
31981 tree decl;
31982 int next_parm_info_bit = 31;
31984 for (decl = DECL_ARGUMENTS (current_function_decl);
31985 decl; decl = DECL_CHAIN (decl))
31987 rtx parameter = DECL_INCOMING_RTL (decl);
31988 machine_mode mode = GET_MODE (parameter);
31990 if (GET_CODE (parameter) == REG)
31992 if (SCALAR_FLOAT_MODE_P (mode))
31994 int bits;
31996 float_parms++;
31998 switch (mode)
32000 case SFmode:
32001 case SDmode:
32002 bits = 0x2;
32003 break;
32005 case DFmode:
32006 case DDmode:
32007 case TFmode:
32008 case TDmode:
32009 case IFmode:
32010 case KFmode:
32011 bits = 0x3;
32012 break;
32014 default:
32015 gcc_unreachable ();
32018 /* If only one bit will fit, don't or in this entry. */
32019 if (next_parm_info_bit > 0)
32020 parm_info |= (bits << (next_parm_info_bit - 1));
32021 next_parm_info_bit -= 2;
32023 else
32025 fixed_parms += ((GET_MODE_SIZE (mode)
32026 + (UNITS_PER_WORD - 1))
32027 / UNITS_PER_WORD);
32028 next_parm_info_bit -= 1;
32034 /* Number of fixed point parameters. */
32035 /* This is actually the number of words of fixed point parameters; thus
32036 an 8 byte struct counts as 2; and thus the maximum value is 8. */
32037 fprintf (file, "%d,", fixed_parms);
32039 /* 2 bitfields: number of floating point parameters (7 bits), parameters
32040 all on stack. */
32041 /* This is actually the number of fp registers that hold parameters;
32042 and thus the maximum value is 13. */
32043 /* Set parameters on stack bit if parameters are not in their original
32044 registers, regardless of whether they are on the stack? Xlc
32045 seems to set the bit when not optimizing. */
32046 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
32048 if (optional_tbtab)
32050 /* Optional fields follow. Some are variable length. */
32052 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single
32053 float, 11 double float. */
32054 /* There is an entry for each parameter in a register, in the order
32055 that they occur in the parameter list. Any intervening arguments
32056 on the stack are ignored. If the list overflows a long (max
32057 possible length 34 bits) then completely leave off all elements
32058 that don't fit. */
32059 /* Only emit this long if there was at least one parameter. */
32060 if (fixed_parms || float_parms)
32061 fprintf (file, "\t.long %d\n", parm_info);
32063 /* Offset from start of code to tb table. */
32064 fputs ("\t.long ", file);
32065 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
32066 RS6000_OUTPUT_BASENAME (file, fname);
32067 putc ('-', file);
32068 rs6000_output_function_entry (file, fname);
32069 putc ('\n', file);
32071 /* Interrupt handler mask. */
32072 /* Omit this long, since we never set the interrupt handler bit
32073 above. */
32075 /* Number of CTL (controlled storage) anchors. */
32076 /* Omit this long, since the has_ctl bit is never set above. */
32078 /* Displacement into stack of each CTL anchor. */
32079 /* Omit this list of longs, because there are no CTL anchors. */
32081 /* Length of function name. */
32082 if (*fname == '*')
32083 ++fname;
32084 fprintf (file, "\t.short %d\n", (int) strlen (fname));
32086 /* Function name. */
32087 assemble_string (fname, strlen (fname));
32089 /* Register for alloca automatic storage; this is always reg 31.
32090 Only emit this if the alloca bit was set above. */
32091 if (frame_pointer_needed)
32092 fputs ("\t.byte 31\n", file);
32094 fputs ("\t.align 2\n", file);
32098 /* Arrange to define .LCTOC1 label, if not already done. */
32099 if (need_toc_init)
32101 need_toc_init = 0;
32102 if (!toc_initialized)
32104 switch_to_section (toc_section);
32105 switch_to_section (current_function_section ());
32110 /* -fsplit-stack support. */
32112 /* A SYMBOL_REF for __morestack. */
32113 static GTY(()) rtx morestack_ref;
32115 static rtx
32116 gen_add3_const (rtx rt, rtx ra, long c)
32118 if (TARGET_64BIT)
32119 return gen_adddi3 (rt, ra, GEN_INT (c));
32120 else
32121 return gen_addsi3 (rt, ra, GEN_INT (c));
32124 /* Emit -fsplit-stack prologue, which goes before the regular function
32125 prologue (at local entry point in the case of ELFv2). */
32127 void
32128 rs6000_expand_split_stack_prologue (void)
32130 rs6000_stack_t *info = rs6000_stack_info ();
32131 unsigned HOST_WIDE_INT allocate;
32132 long alloc_hi, alloc_lo;
32133 rtx r0, r1, r12, lr, ok_label, compare, jump, call_fusage;
32134 rtx_insn *insn;
32136 gcc_assert (flag_split_stack && reload_completed);
32138 if (!info->push_p)
32139 return;
32141 if (global_regs[29])
32143 error ("-fsplit-stack uses register r29");
32144 inform (DECL_SOURCE_LOCATION (global_regs_decl[29]),
32145 "conflicts with %qD", global_regs_decl[29]);
32148 allocate = info->total_size;
32149 if (allocate > (unsigned HOST_WIDE_INT) 1 << 31)
32151 sorry ("Stack frame larger than 2G is not supported for -fsplit-stack");
32152 return;
32154 if (morestack_ref == NULL_RTX)
32156 morestack_ref = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
32157 SYMBOL_REF_FLAGS (morestack_ref) |= (SYMBOL_FLAG_LOCAL
32158 | SYMBOL_FLAG_FUNCTION);
32161 r0 = gen_rtx_REG (Pmode, 0);
32162 r1 = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
32163 r12 = gen_rtx_REG (Pmode, 12);
32164 emit_insn (gen_load_split_stack_limit (r0));
32165 /* Always emit two insns here to calculate the requested stack,
32166 so that the linker can edit them when adjusting size for calling
32167 non-split-stack code. */
32168 alloc_hi = (-allocate + 0x8000) & ~0xffffL;
32169 alloc_lo = -allocate - alloc_hi;
32170 if (alloc_hi != 0)
32172 emit_insn (gen_add3_const (r12, r1, alloc_hi));
32173 if (alloc_lo != 0)
32174 emit_insn (gen_add3_const (r12, r12, alloc_lo));
32175 else
32176 emit_insn (gen_nop ());
32178 else
32180 emit_insn (gen_add3_const (r12, r1, alloc_lo));
32181 emit_insn (gen_nop ());
32184 compare = gen_rtx_REG (CCUNSmode, CR7_REGNO);
32185 emit_insn (gen_rtx_SET (compare, gen_rtx_COMPARE (CCUNSmode, r12, r0)));
32186 ok_label = gen_label_rtx ();
32187 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
32188 gen_rtx_GEU (VOIDmode, compare, const0_rtx),
32189 gen_rtx_LABEL_REF (VOIDmode, ok_label),
32190 pc_rtx);
32191 insn = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
32192 JUMP_LABEL (insn) = ok_label;
32193 /* Mark the jump as very likely to be taken. */
32194 add_reg_br_prob_note (insn, profile_probability::very_likely ());
32196 lr = gen_rtx_REG (Pmode, LR_REGNO);
32197 insn = emit_move_insn (r0, lr);
32198 RTX_FRAME_RELATED_P (insn) = 1;
32199 insn = emit_insn (gen_frame_store (r0, r1, info->lr_save_offset));
32200 RTX_FRAME_RELATED_P (insn) = 1;
32202 insn = emit_call_insn (gen_call (gen_rtx_MEM (SImode, morestack_ref),
32203 const0_rtx, const0_rtx));
32204 call_fusage = NULL_RTX;
32205 use_reg (&call_fusage, r12);
32206 /* Say the call uses r0, even though it doesn't, to stop regrename
32207 from twiddling with the insns saving lr, trashing args for cfun.
32208 The insns restoring lr are similarly protected by making
32209 split_stack_return use r0. */
32210 use_reg (&call_fusage, r0);
32211 add_function_usage_to (insn, call_fusage);
32212 /* Indicate that this function can't jump to non-local gotos. */
32213 make_reg_eh_region_note_nothrow_nononlocal (insn);
32214 emit_insn (gen_frame_load (r0, r1, info->lr_save_offset));
32215 insn = emit_move_insn (lr, r0);
32216 add_reg_note (insn, REG_CFA_RESTORE, lr);
32217 RTX_FRAME_RELATED_P (insn) = 1;
32218 emit_insn (gen_split_stack_return ());
32220 emit_label (ok_label);
32221 LABEL_NUSES (ok_label) = 1;
32224 /* Return the internal arg pointer used for function incoming
32225 arguments. When -fsplit-stack, the arg pointer is r12 so we need
32226 to copy it to a pseudo in order for it to be preserved over calls
32227 and suchlike. We'd really like to use a pseudo here for the
32228 internal arg pointer but data-flow analysis is not prepared to
32229 accept pseudos as live at the beginning of a function. */
32231 static rtx
32232 rs6000_internal_arg_pointer (void)
32234 if (flag_split_stack
32235 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
32236 == NULL))
32239 if (cfun->machine->split_stack_arg_pointer == NULL_RTX)
32241 rtx pat;
32243 cfun->machine->split_stack_arg_pointer = gen_reg_rtx (Pmode);
32244 REG_POINTER (cfun->machine->split_stack_arg_pointer) = 1;
32246 /* Put the pseudo initialization right after the note at the
32247 beginning of the function. */
32248 pat = gen_rtx_SET (cfun->machine->split_stack_arg_pointer,
32249 gen_rtx_REG (Pmode, 12));
32250 push_topmost_sequence ();
32251 emit_insn_after (pat, get_insns ());
32252 pop_topmost_sequence ();
32254 return plus_constant (Pmode, cfun->machine->split_stack_arg_pointer,
32255 FIRST_PARM_OFFSET (current_function_decl));
32257 return virtual_incoming_args_rtx;
32260 /* We may have to tell the dataflow pass that the split stack prologue
32261 is initializing a register. */
32263 static void
32264 rs6000_live_on_entry (bitmap regs)
32266 if (flag_split_stack)
32267 bitmap_set_bit (regs, 12);
32270 /* Emit -fsplit-stack dynamic stack allocation space check. */
32272 void
32273 rs6000_split_stack_space_check (rtx size, rtx label)
32275 rtx sp = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
32276 rtx limit = gen_reg_rtx (Pmode);
32277 rtx requested = gen_reg_rtx (Pmode);
32278 rtx cmp = gen_reg_rtx (CCUNSmode);
32279 rtx jump;
32281 emit_insn (gen_load_split_stack_limit (limit));
32282 if (CONST_INT_P (size))
32283 emit_insn (gen_add3_insn (requested, sp, GEN_INT (-INTVAL (size))));
32284 else
32286 size = force_reg (Pmode, size);
32287 emit_move_insn (requested, gen_rtx_MINUS (Pmode, sp, size));
32289 emit_insn (gen_rtx_SET (cmp, gen_rtx_COMPARE (CCUNSmode, requested, limit)));
32290 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
32291 gen_rtx_GEU (VOIDmode, cmp, const0_rtx),
32292 gen_rtx_LABEL_REF (VOIDmode, label),
32293 pc_rtx);
32294 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
32295 JUMP_LABEL (jump) = label;
32298 /* A C compound statement that outputs the assembler code for a thunk
32299 function, used to implement C++ virtual function calls with
32300 multiple inheritance. The thunk acts as a wrapper around a virtual
32301 function, adjusting the implicit object parameter before handing
32302 control off to the real function.
32304 First, emit code to add the integer DELTA to the location that
32305 contains the incoming first argument. Assume that this argument
32306 contains a pointer, and is the one used to pass the `this' pointer
32307 in C++. This is the incoming argument *before* the function
32308 prologue, e.g. `%o0' on a sparc. The addition must preserve the
32309 values of all other incoming arguments.
32311 After the addition, emit code to jump to FUNCTION, which is a
32312 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
32313 not touch the return address. Hence returning from FUNCTION will
32314 return to whoever called the current `thunk'.
32316 The effect must be as if FUNCTION had been called directly with the
32317 adjusted first argument. This macro is responsible for emitting
32318 all of the code for a thunk function; output_function_prologue()
32319 and output_function_epilogue() are not invoked.
32321 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
32322 been extracted from it.) It might possibly be useful on some
32323 targets, but probably not.
32325 If you do not define this macro, the target-independent code in the
32326 C++ frontend will generate a less efficient heavyweight thunk that
32327 calls FUNCTION instead of jumping to it. The generic approach does
32328 not support varargs. */
32330 static void
32331 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
32332 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
32333 tree function)
32335 rtx this_rtx, funexp;
32336 rtx_insn *insn;
32338 reload_completed = 1;
32339 epilogue_completed = 1;
32341 /* Mark the end of the (empty) prologue. */
32342 emit_note (NOTE_INSN_PROLOGUE_END);
32344 /* Find the "this" pointer. If the function returns a structure,
32345 the structure return pointer is in r3. */
32346 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
32347 this_rtx = gen_rtx_REG (Pmode, 4);
32348 else
32349 this_rtx = gen_rtx_REG (Pmode, 3);
32351 /* Apply the constant offset, if required. */
32352 if (delta)
32353 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
32355 /* Apply the offset from the vtable, if required. */
32356 if (vcall_offset)
32358 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
32359 rtx tmp = gen_rtx_REG (Pmode, 12);
32361 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
32362 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
32364 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
32365 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
32367 else
32369 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
32371 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
32373 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
32376 /* Generate a tail call to the target function. */
32377 if (!TREE_USED (function))
32379 assemble_external (function);
32380 TREE_USED (function) = 1;
32382 funexp = XEXP (DECL_RTL (function), 0);
32383 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
32385 #if TARGET_MACHO
32386 if (MACHOPIC_INDIRECT)
32387 funexp = machopic_indirect_call_target (funexp);
32388 #endif
32390 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
32391 generate sibcall RTL explicitly. */
32392 insn = emit_call_insn (
32393 gen_rtx_PARALLEL (VOIDmode,
32394 gen_rtvec (3,
32395 gen_rtx_CALL (VOIDmode,
32396 funexp, const0_rtx),
32397 gen_rtx_USE (VOIDmode, const0_rtx),
32398 simple_return_rtx)));
32399 SIBLING_CALL_P (insn) = 1;
32400 emit_barrier ();
32402 /* Run just enough of rest_of_compilation to get the insns emitted.
32403 There's not really enough bulk here to make other passes such as
32404 instruction scheduling worth while. Note that use_thunk calls
32405 assemble_start_function and assemble_end_function. */
32406 insn = get_insns ();
32407 shorten_branches (insn);
32408 final_start_function (insn, file, 1);
32409 final (insn, file, 1);
32410 final_end_function ();
32412 reload_completed = 0;
32413 epilogue_completed = 0;
32416 /* A quick summary of the various types of 'constant-pool tables'
32417 under PowerPC:
32419 Target Flags Name One table per
32420 AIX (none) AIX TOC object file
32421 AIX -mfull-toc AIX TOC object file
32422 AIX -mminimal-toc AIX minimal TOC translation unit
32423 SVR4/EABI (none) SVR4 SDATA object file
32424 SVR4/EABI -fpic SVR4 pic object file
32425 SVR4/EABI -fPIC SVR4 PIC translation unit
32426 SVR4/EABI -mrelocatable EABI TOC function
32427 SVR4/EABI -maix AIX TOC object file
32428 SVR4/EABI -maix -mminimal-toc
32429 AIX minimal TOC translation unit
32431 Name Reg. Set by entries contains:
32432 made by addrs? fp? sum?
32434 AIX TOC 2 crt0 as Y option option
32435 AIX minimal TOC 30 prolog gcc Y Y option
32436 SVR4 SDATA 13 crt0 gcc N Y N
32437 SVR4 pic 30 prolog ld Y not yet N
32438 SVR4 PIC 30 prolog gcc Y option option
32439 EABI TOC 30 prolog gcc Y option option
32443 /* Hash functions for the hash table. */
32445 static unsigned
32446 rs6000_hash_constant (rtx k)
32448 enum rtx_code code = GET_CODE (k);
32449 machine_mode mode = GET_MODE (k);
32450 unsigned result = (code << 3) ^ mode;
32451 const char *format;
32452 int flen, fidx;
32454 format = GET_RTX_FORMAT (code);
32455 flen = strlen (format);
32456 fidx = 0;
32458 switch (code)
32460 case LABEL_REF:
32461 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
32463 case CONST_WIDE_INT:
32465 int i;
32466 flen = CONST_WIDE_INT_NUNITS (k);
32467 for (i = 0; i < flen; i++)
32468 result = result * 613 + CONST_WIDE_INT_ELT (k, i);
32469 return result;
32472 case CONST_DOUBLE:
32473 if (mode != VOIDmode)
32474 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
32475 flen = 2;
32476 break;
32478 case CODE_LABEL:
32479 fidx = 3;
32480 break;
32482 default:
32483 break;
32486 for (; fidx < flen; fidx++)
32487 switch (format[fidx])
32489 case 's':
32491 unsigned i, len;
32492 const char *str = XSTR (k, fidx);
32493 len = strlen (str);
32494 result = result * 613 + len;
32495 for (i = 0; i < len; i++)
32496 result = result * 613 + (unsigned) str[i];
32497 break;
32499 case 'u':
32500 case 'e':
32501 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
32502 break;
32503 case 'i':
32504 case 'n':
32505 result = result * 613 + (unsigned) XINT (k, fidx);
32506 break;
32507 case 'w':
32508 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
32509 result = result * 613 + (unsigned) XWINT (k, fidx);
32510 else
32512 size_t i;
32513 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
32514 result = result * 613 + (unsigned) (XWINT (k, fidx)
32515 >> CHAR_BIT * i);
32517 break;
32518 case '0':
32519 break;
32520 default:
32521 gcc_unreachable ();
32524 return result;
32527 hashval_t
32528 toc_hasher::hash (toc_hash_struct *thc)
32530 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
32533 /* Compare H1 and H2 for equivalence. */
32535 bool
32536 toc_hasher::equal (toc_hash_struct *h1, toc_hash_struct *h2)
32538 rtx r1 = h1->key;
32539 rtx r2 = h2->key;
32541 if (h1->key_mode != h2->key_mode)
32542 return 0;
32544 return rtx_equal_p (r1, r2);
32547 /* These are the names given by the C++ front-end to vtables, and
32548 vtable-like objects. Ideally, this logic should not be here;
32549 instead, there should be some programmatic way of inquiring as
32550 to whether or not an object is a vtable. */
32552 #define VTABLE_NAME_P(NAME) \
32553 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
32554 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
32555 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
32556 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
32557 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
32559 #ifdef NO_DOLLAR_IN_LABEL
32560 /* Return a GGC-allocated character string translating dollar signs in
32561 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
32563 const char *
32564 rs6000_xcoff_strip_dollar (const char *name)
32566 char *strip, *p;
32567 const char *q;
32568 size_t len;
32570 q = (const char *) strchr (name, '$');
32572 if (q == 0 || q == name)
32573 return name;
32575 len = strlen (name);
32576 strip = XALLOCAVEC (char, len + 1);
32577 strcpy (strip, name);
32578 p = strip + (q - name);
32579 while (p)
32581 *p = '_';
32582 p = strchr (p + 1, '$');
32585 return ggc_alloc_string (strip, len);
32587 #endif
32589 void
32590 rs6000_output_symbol_ref (FILE *file, rtx x)
32592 const char *name = XSTR (x, 0);
32594 /* Currently C++ toc references to vtables can be emitted before it
32595 is decided whether the vtable is public or private. If this is
32596 the case, then the linker will eventually complain that there is
32597 a reference to an unknown section. Thus, for vtables only,
32598 we emit the TOC reference to reference the identifier and not the
32599 symbol. */
32600 if (VTABLE_NAME_P (name))
32602 RS6000_OUTPUT_BASENAME (file, name);
32604 else
32605 assemble_name (file, name);
32608 /* Output a TOC entry. We derive the entry name from what is being
32609 written. */
32611 void
32612 output_toc (FILE *file, rtx x, int labelno, machine_mode mode)
32614 char buf[256];
32615 const char *name = buf;
32616 rtx base = x;
32617 HOST_WIDE_INT offset = 0;
32619 gcc_assert (!TARGET_NO_TOC);
32621 /* When the linker won't eliminate them, don't output duplicate
32622 TOC entries (this happens on AIX if there is any kind of TOC,
32623 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
32624 CODE_LABELs. */
32625 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
32627 struct toc_hash_struct *h;
32629 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
32630 time because GGC is not initialized at that point. */
32631 if (toc_hash_table == NULL)
32632 toc_hash_table = hash_table<toc_hasher>::create_ggc (1021);
32634 h = ggc_alloc<toc_hash_struct> ();
32635 h->key = x;
32636 h->key_mode = mode;
32637 h->labelno = labelno;
32639 toc_hash_struct **found = toc_hash_table->find_slot (h, INSERT);
32640 if (*found == NULL)
32641 *found = h;
32642 else /* This is indeed a duplicate.
32643 Set this label equal to that label. */
32645 fputs ("\t.set ", file);
32646 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
32647 fprintf (file, "%d,", labelno);
32648 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
32649 fprintf (file, "%d\n", ((*found)->labelno));
32651 #ifdef HAVE_AS_TLS
32652 if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF
32653 && (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC
32654 || SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC))
32656 fputs ("\t.set ", file);
32657 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
32658 fprintf (file, "%d,", labelno);
32659 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
32660 fprintf (file, "%d\n", ((*found)->labelno));
32662 #endif
32663 return;
32667 /* If we're going to put a double constant in the TOC, make sure it's
32668 aligned properly when strict alignment is on. */
32669 if ((CONST_DOUBLE_P (x) || CONST_WIDE_INT_P (x))
32670 && STRICT_ALIGNMENT
32671 && GET_MODE_BITSIZE (mode) >= 64
32672 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
32673 ASM_OUTPUT_ALIGN (file, 3);
32676 (*targetm.asm_out.internal_label) (file, "LC", labelno);
32678 /* Handle FP constants specially. Note that if we have a minimal
32679 TOC, things we put here aren't actually in the TOC, so we can allow
32680 FP constants. */
32681 if (GET_CODE (x) == CONST_DOUBLE &&
32682 (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode
32683 || GET_MODE (x) == IFmode || GET_MODE (x) == KFmode))
32685 long k[4];
32687 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
32688 REAL_VALUE_TO_TARGET_DECIMAL128 (*CONST_DOUBLE_REAL_VALUE (x), k);
32689 else
32690 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
32692 if (TARGET_64BIT)
32694 if (TARGET_ELF || TARGET_MINIMAL_TOC)
32695 fputs (DOUBLE_INT_ASM_OP, file);
32696 else
32697 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
32698 k[0] & 0xffffffff, k[1] & 0xffffffff,
32699 k[2] & 0xffffffff, k[3] & 0xffffffff);
32700 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
32701 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
32702 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff,
32703 k[WORDS_BIG_ENDIAN ? 2 : 3] & 0xffffffff,
32704 k[WORDS_BIG_ENDIAN ? 3 : 2] & 0xffffffff);
32705 return;
32707 else
32709 if (TARGET_ELF || TARGET_MINIMAL_TOC)
32710 fputs ("\t.long ", file);
32711 else
32712 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
32713 k[0] & 0xffffffff, k[1] & 0xffffffff,
32714 k[2] & 0xffffffff, k[3] & 0xffffffff);
32715 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
32716 k[0] & 0xffffffff, k[1] & 0xffffffff,
32717 k[2] & 0xffffffff, k[3] & 0xffffffff);
32718 return;
32721 else if (GET_CODE (x) == CONST_DOUBLE &&
32722 (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
32724 long k[2];
32726 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
32727 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (x), k);
32728 else
32729 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
32731 if (TARGET_64BIT)
32733 if (TARGET_ELF || TARGET_MINIMAL_TOC)
32734 fputs (DOUBLE_INT_ASM_OP, file);
32735 else
32736 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
32737 k[0] & 0xffffffff, k[1] & 0xffffffff);
32738 fprintf (file, "0x%lx%08lx\n",
32739 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
32740 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff);
32741 return;
32743 else
32745 if (TARGET_ELF || TARGET_MINIMAL_TOC)
32746 fputs ("\t.long ", file);
32747 else
32748 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
32749 k[0] & 0xffffffff, k[1] & 0xffffffff);
32750 fprintf (file, "0x%lx,0x%lx\n",
32751 k[0] & 0xffffffff, k[1] & 0xffffffff);
32752 return;
32755 else if (GET_CODE (x) == CONST_DOUBLE &&
32756 (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
32758 long l;
32760 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
32761 REAL_VALUE_TO_TARGET_DECIMAL32 (*CONST_DOUBLE_REAL_VALUE (x), l);
32762 else
32763 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x), l);
32765 if (TARGET_64BIT)
32767 if (TARGET_ELF || TARGET_MINIMAL_TOC)
32768 fputs (DOUBLE_INT_ASM_OP, file);
32769 else
32770 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
32771 if (WORDS_BIG_ENDIAN)
32772 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
32773 else
32774 fprintf (file, "0x%lx\n", l & 0xffffffff);
32775 return;
32777 else
32779 if (TARGET_ELF || TARGET_MINIMAL_TOC)
32780 fputs ("\t.long ", file);
32781 else
32782 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
32783 fprintf (file, "0x%lx\n", l & 0xffffffff);
32784 return;
32787 else if (GET_MODE (x) == VOIDmode && GET_CODE (x) == CONST_INT)
32789 unsigned HOST_WIDE_INT low;
32790 HOST_WIDE_INT high;
32792 low = INTVAL (x) & 0xffffffff;
32793 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
32795 /* TOC entries are always Pmode-sized, so when big-endian
32796 smaller integer constants in the TOC need to be padded.
32797 (This is still a win over putting the constants in
32798 a separate constant pool, because then we'd have
32799 to have both a TOC entry _and_ the actual constant.)
32801 For a 32-bit target, CONST_INT values are loaded and shifted
32802 entirely within `low' and can be stored in one TOC entry. */
32804 /* It would be easy to make this work, but it doesn't now. */
32805 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
32807 if (WORDS_BIG_ENDIAN && POINTER_SIZE > GET_MODE_BITSIZE (mode))
32809 low |= high << 32;
32810 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
32811 high = (HOST_WIDE_INT) low >> 32;
32812 low &= 0xffffffff;
32815 if (TARGET_64BIT)
32817 if (TARGET_ELF || TARGET_MINIMAL_TOC)
32818 fputs (DOUBLE_INT_ASM_OP, file);
32819 else
32820 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
32821 (long) high & 0xffffffff, (long) low & 0xffffffff);
32822 fprintf (file, "0x%lx%08lx\n",
32823 (long) high & 0xffffffff, (long) low & 0xffffffff);
32824 return;
32826 else
32828 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
32830 if (TARGET_ELF || TARGET_MINIMAL_TOC)
32831 fputs ("\t.long ", file);
32832 else
32833 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
32834 (long) high & 0xffffffff, (long) low & 0xffffffff);
32835 fprintf (file, "0x%lx,0x%lx\n",
32836 (long) high & 0xffffffff, (long) low & 0xffffffff);
32838 else
32840 if (TARGET_ELF || TARGET_MINIMAL_TOC)
32841 fputs ("\t.long ", file);
32842 else
32843 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
32844 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
32846 return;
32850 if (GET_CODE (x) == CONST)
32852 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
32853 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT);
32855 base = XEXP (XEXP (x, 0), 0);
32856 offset = INTVAL (XEXP (XEXP (x, 0), 1));
32859 switch (GET_CODE (base))
32861 case SYMBOL_REF:
32862 name = XSTR (base, 0);
32863 break;
32865 case LABEL_REF:
32866 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
32867 CODE_LABEL_NUMBER (XEXP (base, 0)));
32868 break;
32870 case CODE_LABEL:
32871 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
32872 break;
32874 default:
32875 gcc_unreachable ();
32878 if (TARGET_ELF || TARGET_MINIMAL_TOC)
32879 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
32880 else
32882 fputs ("\t.tc ", file);
32883 RS6000_OUTPUT_BASENAME (file, name);
32885 if (offset < 0)
32886 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
32887 else if (offset)
32888 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
32890 /* Mark large TOC symbols on AIX with [TE] so they are mapped
32891 after other TOC symbols, reducing overflow of small TOC access
32892 to [TC] symbols. */
32893 fputs (TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL
32894 ? "[TE]," : "[TC],", file);
32897 /* Currently C++ toc references to vtables can be emitted before it
32898 is decided whether the vtable is public or private. If this is
32899 the case, then the linker will eventually complain that there is
32900 a TOC reference to an unknown section. Thus, for vtables only,
32901 we emit the TOC reference to reference the symbol and not the
32902 section. */
32903 if (VTABLE_NAME_P (name))
32905 RS6000_OUTPUT_BASENAME (file, name);
32906 if (offset < 0)
32907 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
32908 else if (offset > 0)
32909 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
32911 else
32912 output_addr_const (file, x);
32914 #if HAVE_AS_TLS
32915 if (TARGET_XCOFF && GET_CODE (base) == SYMBOL_REF)
32917 switch (SYMBOL_REF_TLS_MODEL (base))
32919 case 0:
32920 break;
32921 case TLS_MODEL_LOCAL_EXEC:
32922 fputs ("@le", file);
32923 break;
32924 case TLS_MODEL_INITIAL_EXEC:
32925 fputs ("@ie", file);
32926 break;
32927 /* Use global-dynamic for local-dynamic. */
32928 case TLS_MODEL_GLOBAL_DYNAMIC:
32929 case TLS_MODEL_LOCAL_DYNAMIC:
32930 putc ('\n', file);
32931 (*targetm.asm_out.internal_label) (file, "LCM", labelno);
32932 fputs ("\t.tc .", file);
32933 RS6000_OUTPUT_BASENAME (file, name);
32934 fputs ("[TC],", file);
32935 output_addr_const (file, x);
32936 fputs ("@m", file);
32937 break;
32938 default:
32939 gcc_unreachable ();
32942 #endif
32944 putc ('\n', file);
32947 /* Output an assembler pseudo-op to write an ASCII string of N characters
32948 starting at P to FILE.
32950 On the RS/6000, we have to do this using the .byte operation and
32951 write out special characters outside the quoted string.
32952 Also, the assembler is broken; very long strings are truncated,
32953 so we must artificially break them up early. */
32955 void
32956 output_ascii (FILE *file, const char *p, int n)
32958 char c;
32959 int i, count_string;
32960 const char *for_string = "\t.byte \"";
32961 const char *for_decimal = "\t.byte ";
32962 const char *to_close = NULL;
32964 count_string = 0;
32965 for (i = 0; i < n; i++)
32967 c = *p++;
32968 if (c >= ' ' && c < 0177)
32970 if (for_string)
32971 fputs (for_string, file);
32972 putc (c, file);
32974 /* Write two quotes to get one. */
32975 if (c == '"')
32977 putc (c, file);
32978 ++count_string;
32981 for_string = NULL;
32982 for_decimal = "\"\n\t.byte ";
32983 to_close = "\"\n";
32984 ++count_string;
32986 if (count_string >= 512)
32988 fputs (to_close, file);
32990 for_string = "\t.byte \"";
32991 for_decimal = "\t.byte ";
32992 to_close = NULL;
32993 count_string = 0;
32996 else
32998 if (for_decimal)
32999 fputs (for_decimal, file);
33000 fprintf (file, "%d", c);
33002 for_string = "\n\t.byte \"";
33003 for_decimal = ", ";
33004 to_close = "\n";
33005 count_string = 0;
33009 /* Now close the string if we have written one. Then end the line. */
33010 if (to_close)
33011 fputs (to_close, file);
33014 /* Generate a unique section name for FILENAME for a section type
33015 represented by SECTION_DESC. Output goes into BUF.
33017 SECTION_DESC can be any string, as long as it is different for each
33018 possible section type.
33020 We name the section in the same manner as xlc. The name begins with an
33021 underscore followed by the filename (after stripping any leading directory
33022 names) with the last period replaced by the string SECTION_DESC. If
33023 FILENAME does not contain a period, SECTION_DESC is appended to the end of
33024 the name. */
33026 void
33027 rs6000_gen_section_name (char **buf, const char *filename,
33028 const char *section_desc)
33030 const char *q, *after_last_slash, *last_period = 0;
33031 char *p;
33032 int len;
33034 after_last_slash = filename;
33035 for (q = filename; *q; q++)
33037 if (*q == '/')
33038 after_last_slash = q + 1;
33039 else if (*q == '.')
33040 last_period = q;
33043 len = strlen (after_last_slash) + strlen (section_desc) + 2;
33044 *buf = (char *) xmalloc (len);
33046 p = *buf;
33047 *p++ = '_';
33049 for (q = after_last_slash; *q; q++)
33051 if (q == last_period)
33053 strcpy (p, section_desc);
33054 p += strlen (section_desc);
33055 break;
33058 else if (ISALNUM (*q))
33059 *p++ = *q;
33062 if (last_period == 0)
33063 strcpy (p, section_desc);
33064 else
33065 *p = '\0';
33068 /* Emit profile function. */
33070 void
33071 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
33073 /* Non-standard profiling for kernels, which just saves LR then calls
33074 _mcount without worrying about arg saves. The idea is to change
33075 the function prologue as little as possible as it isn't easy to
33076 account for arg save/restore code added just for _mcount. */
33077 if (TARGET_PROFILE_KERNEL)
33078 return;
33080 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
33082 #ifndef NO_PROFILE_COUNTERS
33083 # define NO_PROFILE_COUNTERS 0
33084 #endif
33085 if (NO_PROFILE_COUNTERS)
33086 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
33087 LCT_NORMAL, VOIDmode, 0);
33088 else
33090 char buf[30];
33091 const char *label_name;
33092 rtx fun;
33094 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
33095 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
33096 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
33098 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
33099 LCT_NORMAL, VOIDmode, 1, fun, Pmode);
33102 else if (DEFAULT_ABI == ABI_DARWIN)
33104 const char *mcount_name = RS6000_MCOUNT;
33105 int caller_addr_regno = LR_REGNO;
33107 /* Be conservative and always set this, at least for now. */
33108 crtl->uses_pic_offset_table = 1;
33110 #if TARGET_MACHO
33111 /* For PIC code, set up a stub and collect the caller's address
33112 from r0, which is where the prologue puts it. */
33113 if (MACHOPIC_INDIRECT
33114 && crtl->uses_pic_offset_table)
33115 caller_addr_regno = 0;
33116 #endif
33117 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
33118 LCT_NORMAL, VOIDmode, 1,
33119 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
33123 /* Write function profiler code. */
33125 void
33126 output_function_profiler (FILE *file, int labelno)
33128 char buf[100];
33130 switch (DEFAULT_ABI)
33132 default:
33133 gcc_unreachable ();
33135 case ABI_V4:
33136 if (!TARGET_32BIT)
33138 warning (0, "no profiling of 64-bit code for this ABI");
33139 return;
33141 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
33142 fprintf (file, "\tmflr %s\n", reg_names[0]);
33143 if (NO_PROFILE_COUNTERS)
33145 asm_fprintf (file, "\tstw %s,4(%s)\n",
33146 reg_names[0], reg_names[1]);
33148 else if (TARGET_SECURE_PLT && flag_pic)
33150 if (TARGET_LINK_STACK)
33152 char name[32];
33153 get_ppc476_thunk_name (name);
33154 asm_fprintf (file, "\tbl %s\n", name);
33156 else
33157 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
33158 asm_fprintf (file, "\tstw %s,4(%s)\n",
33159 reg_names[0], reg_names[1]);
33160 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
33161 asm_fprintf (file, "\taddis %s,%s,",
33162 reg_names[12], reg_names[12]);
33163 assemble_name (file, buf);
33164 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
33165 assemble_name (file, buf);
33166 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
33168 else if (flag_pic == 1)
33170 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
33171 asm_fprintf (file, "\tstw %s,4(%s)\n",
33172 reg_names[0], reg_names[1]);
33173 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
33174 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
33175 assemble_name (file, buf);
33176 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
33178 else if (flag_pic > 1)
33180 asm_fprintf (file, "\tstw %s,4(%s)\n",
33181 reg_names[0], reg_names[1]);
33182 /* Now, we need to get the address of the label. */
33183 if (TARGET_LINK_STACK)
33185 char name[32];
33186 get_ppc476_thunk_name (name);
33187 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
33188 assemble_name (file, buf);
33189 fputs ("-.\n1:", file);
33190 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
33191 asm_fprintf (file, "\taddi %s,%s,4\n",
33192 reg_names[11], reg_names[11]);
33194 else
33196 fputs ("\tbcl 20,31,1f\n\t.long ", file);
33197 assemble_name (file, buf);
33198 fputs ("-.\n1:", file);
33199 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
33201 asm_fprintf (file, "\tlwz %s,0(%s)\n",
33202 reg_names[0], reg_names[11]);
33203 asm_fprintf (file, "\tadd %s,%s,%s\n",
33204 reg_names[0], reg_names[0], reg_names[11]);
33206 else
33208 asm_fprintf (file, "\tlis %s,", reg_names[12]);
33209 assemble_name (file, buf);
33210 fputs ("@ha\n", file);
33211 asm_fprintf (file, "\tstw %s,4(%s)\n",
33212 reg_names[0], reg_names[1]);
33213 asm_fprintf (file, "\tla %s,", reg_names[0]);
33214 assemble_name (file, buf);
33215 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
33218 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
33219 fprintf (file, "\tbl %s%s\n",
33220 RS6000_MCOUNT, flag_pic ? "@plt" : "");
33221 break;
33223 case ABI_AIX:
33224 case ABI_ELFv2:
33225 case ABI_DARWIN:
33226 /* Don't do anything, done in output_profile_hook (). */
33227 break;
33233 /* The following variable value is the last issued insn. */
33235 static rtx_insn *last_scheduled_insn;
33237 /* The following variable helps to balance issuing of load and
33238 store instructions */
33240 static int load_store_pendulum;
33242 /* The following variable helps pair divide insns during scheduling. */
33243 static int divide_cnt;
33244 /* The following variable helps pair and alternate vector and vector load
33245 insns during scheduling. */
33246 static int vec_pairing;
33249 /* Power4 load update and store update instructions are cracked into a
33250 load or store and an integer insn which are executed in the same cycle.
33251 Branches have their own dispatch slot which does not count against the
33252 GCC issue rate, but it changes the program flow so there are no other
33253 instructions to issue in this cycle. */
33255 static int
33256 rs6000_variable_issue_1 (rtx_insn *insn, int more)
33258 last_scheduled_insn = insn;
33259 if (GET_CODE (PATTERN (insn)) == USE
33260 || GET_CODE (PATTERN (insn)) == CLOBBER)
33262 cached_can_issue_more = more;
33263 return cached_can_issue_more;
33266 if (insn_terminates_group_p (insn, current_group))
33268 cached_can_issue_more = 0;
33269 return cached_can_issue_more;
33272 /* If no reservation, but reach here */
33273 if (recog_memoized (insn) < 0)
33274 return more;
33276 if (rs6000_sched_groups)
33278 if (is_microcoded_insn (insn))
33279 cached_can_issue_more = 0;
33280 else if (is_cracked_insn (insn))
33281 cached_can_issue_more = more > 2 ? more - 2 : 0;
33282 else
33283 cached_can_issue_more = more - 1;
33285 return cached_can_issue_more;
33288 if (rs6000_cpu_attr == CPU_CELL && is_nonpipeline_insn (insn))
33289 return 0;
33291 cached_can_issue_more = more - 1;
33292 return cached_can_issue_more;
33295 static int
33296 rs6000_variable_issue (FILE *stream, int verbose, rtx_insn *insn, int more)
33298 int r = rs6000_variable_issue_1 (insn, more);
33299 if (verbose)
33300 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
33301 return r;
33304 /* Adjust the cost of a scheduling dependency. Return the new cost of
33305 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
33307 static int
33308 rs6000_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
33309 unsigned int)
33311 enum attr_type attr_type;
33313 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
33314 return cost;
33316 switch (dep_type)
33318 case REG_DEP_TRUE:
33320 /* Data dependency; DEP_INSN writes a register that INSN reads
33321 some cycles later. */
33323 /* Separate a load from a narrower, dependent store. */
33324 if ((rs6000_sched_groups || rs6000_cpu_attr == CPU_POWER9)
33325 && GET_CODE (PATTERN (insn)) == SET
33326 && GET_CODE (PATTERN (dep_insn)) == SET
33327 && GET_CODE (XEXP (PATTERN (insn), 1)) == MEM
33328 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == MEM
33329 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
33330 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
33331 return cost + 14;
33333 attr_type = get_attr_type (insn);
33335 switch (attr_type)
33337 case TYPE_JMPREG:
33338 /* Tell the first scheduling pass about the latency between
33339 a mtctr and bctr (and mtlr and br/blr). The first
33340 scheduling pass will not know about this latency since
33341 the mtctr instruction, which has the latency associated
33342 to it, will be generated by reload. */
33343 return 4;
33344 case TYPE_BRANCH:
33345 /* Leave some extra cycles between a compare and its
33346 dependent branch, to inhibit expensive mispredicts. */
33347 if ((rs6000_cpu_attr == CPU_PPC603
33348 || rs6000_cpu_attr == CPU_PPC604
33349 || rs6000_cpu_attr == CPU_PPC604E
33350 || rs6000_cpu_attr == CPU_PPC620
33351 || rs6000_cpu_attr == CPU_PPC630
33352 || rs6000_cpu_attr == CPU_PPC750
33353 || rs6000_cpu_attr == CPU_PPC7400
33354 || rs6000_cpu_attr == CPU_PPC7450
33355 || rs6000_cpu_attr == CPU_PPCE5500
33356 || rs6000_cpu_attr == CPU_PPCE6500
33357 || rs6000_cpu_attr == CPU_POWER4
33358 || rs6000_cpu_attr == CPU_POWER5
33359 || rs6000_cpu_attr == CPU_POWER7
33360 || rs6000_cpu_attr == CPU_POWER8
33361 || rs6000_cpu_attr == CPU_POWER9
33362 || rs6000_cpu_attr == CPU_CELL)
33363 && recog_memoized (dep_insn)
33364 && (INSN_CODE (dep_insn) >= 0))
33366 switch (get_attr_type (dep_insn))
33368 case TYPE_CMP:
33369 case TYPE_FPCOMPARE:
33370 case TYPE_CR_LOGICAL:
33371 case TYPE_DELAYED_CR:
33372 return cost + 2;
33373 case TYPE_EXTS:
33374 case TYPE_MUL:
33375 if (get_attr_dot (dep_insn) == DOT_YES)
33376 return cost + 2;
33377 else
33378 break;
33379 case TYPE_SHIFT:
33380 if (get_attr_dot (dep_insn) == DOT_YES
33381 && get_attr_var_shift (dep_insn) == VAR_SHIFT_NO)
33382 return cost + 2;
33383 else
33384 break;
33385 default:
33386 break;
33388 break;
33390 case TYPE_STORE:
33391 case TYPE_FPSTORE:
33392 if ((rs6000_cpu == PROCESSOR_POWER6)
33393 && recog_memoized (dep_insn)
33394 && (INSN_CODE (dep_insn) >= 0))
33397 if (GET_CODE (PATTERN (insn)) != SET)
33398 /* If this happens, we have to extend this to schedule
33399 optimally. Return default for now. */
33400 return cost;
33402 /* Adjust the cost for the case where the value written
33403 by a fixed point operation is used as the address
33404 gen value on a store. */
33405 switch (get_attr_type (dep_insn))
33407 case TYPE_LOAD:
33408 case TYPE_CNTLZ:
33410 if (! rs6000_store_data_bypass_p (dep_insn, insn))
33411 return get_attr_sign_extend (dep_insn)
33412 == SIGN_EXTEND_YES ? 6 : 4;
33413 break;
33415 case TYPE_SHIFT:
33417 if (! rs6000_store_data_bypass_p (dep_insn, insn))
33418 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
33419 6 : 3;
33420 break;
33422 case TYPE_INTEGER:
33423 case TYPE_ADD:
33424 case TYPE_LOGICAL:
33425 case TYPE_EXTS:
33426 case TYPE_INSERT:
33428 if (! rs6000_store_data_bypass_p (dep_insn, insn))
33429 return 3;
33430 break;
33432 case TYPE_STORE:
33433 case TYPE_FPLOAD:
33434 case TYPE_FPSTORE:
33436 if (get_attr_update (dep_insn) == UPDATE_YES
33437 && ! rs6000_store_data_bypass_p (dep_insn, insn))
33438 return 3;
33439 break;
33441 case TYPE_MUL:
33443 if (! rs6000_store_data_bypass_p (dep_insn, insn))
33444 return 17;
33445 break;
33447 case TYPE_DIV:
33449 if (! rs6000_store_data_bypass_p (dep_insn, insn))
33450 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
33451 break;
33453 default:
33454 break;
33457 break;
33459 case TYPE_LOAD:
33460 if ((rs6000_cpu == PROCESSOR_POWER6)
33461 && recog_memoized (dep_insn)
33462 && (INSN_CODE (dep_insn) >= 0))
33465 /* Adjust the cost for the case where the value written
33466 by a fixed point instruction is used within the address
33467 gen portion of a subsequent load(u)(x) */
33468 switch (get_attr_type (dep_insn))
33470 case TYPE_LOAD:
33471 case TYPE_CNTLZ:
33473 if (set_to_load_agen (dep_insn, insn))
33474 return get_attr_sign_extend (dep_insn)
33475 == SIGN_EXTEND_YES ? 6 : 4;
33476 break;
33478 case TYPE_SHIFT:
33480 if (set_to_load_agen (dep_insn, insn))
33481 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
33482 6 : 3;
33483 break;
33485 case TYPE_INTEGER:
33486 case TYPE_ADD:
33487 case TYPE_LOGICAL:
33488 case TYPE_EXTS:
33489 case TYPE_INSERT:
33491 if (set_to_load_agen (dep_insn, insn))
33492 return 3;
33493 break;
33495 case TYPE_STORE:
33496 case TYPE_FPLOAD:
33497 case TYPE_FPSTORE:
33499 if (get_attr_update (dep_insn) == UPDATE_YES
33500 && set_to_load_agen (dep_insn, insn))
33501 return 3;
33502 break;
33504 case TYPE_MUL:
33506 if (set_to_load_agen (dep_insn, insn))
33507 return 17;
33508 break;
33510 case TYPE_DIV:
33512 if (set_to_load_agen (dep_insn, insn))
33513 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
33514 break;
33516 default:
33517 break;
33520 break;
33522 case TYPE_FPLOAD:
33523 if ((rs6000_cpu == PROCESSOR_POWER6)
33524 && get_attr_update (insn) == UPDATE_NO
33525 && recog_memoized (dep_insn)
33526 && (INSN_CODE (dep_insn) >= 0)
33527 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
33528 return 2;
33530 default:
33531 break;
33534 /* Fall out to return default cost. */
33536 break;
33538 case REG_DEP_OUTPUT:
33539 /* Output dependency; DEP_INSN writes a register that INSN writes some
33540 cycles later. */
33541 if ((rs6000_cpu == PROCESSOR_POWER6)
33542 && recog_memoized (dep_insn)
33543 && (INSN_CODE (dep_insn) >= 0))
33545 attr_type = get_attr_type (insn);
33547 switch (attr_type)
33549 case TYPE_FP:
33550 case TYPE_FPSIMPLE:
33551 if (get_attr_type (dep_insn) == TYPE_FP
33552 || get_attr_type (dep_insn) == TYPE_FPSIMPLE)
33553 return 1;
33554 break;
33555 case TYPE_FPLOAD:
33556 if (get_attr_update (insn) == UPDATE_NO
33557 && get_attr_type (dep_insn) == TYPE_MFFGPR)
33558 return 2;
33559 break;
33560 default:
33561 break;
33564 /* Fall through, no cost for output dependency. */
33565 /* FALLTHRU */
33567 case REG_DEP_ANTI:
33568 /* Anti dependency; DEP_INSN reads a register that INSN writes some
33569 cycles later. */
33570 return 0;
33572 default:
33573 gcc_unreachable ();
33576 return cost;
33579 /* Debug version of rs6000_adjust_cost. */
33581 static int
33582 rs6000_debug_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn,
33583 int cost, unsigned int dw)
33585 int ret = rs6000_adjust_cost (insn, dep_type, dep_insn, cost, dw);
33587 if (ret != cost)
33589 const char *dep;
33591 switch (dep_type)
33593 default: dep = "unknown depencency"; break;
33594 case REG_DEP_TRUE: dep = "data dependency"; break;
33595 case REG_DEP_OUTPUT: dep = "output dependency"; break;
33596 case REG_DEP_ANTI: dep = "anti depencency"; break;
33599 fprintf (stderr,
33600 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
33601 "%s, insn:\n", ret, cost, dep);
33603 debug_rtx (insn);
33606 return ret;
33609 /* The function returns a true if INSN is microcoded.
33610 Return false otherwise. */
33612 static bool
33613 is_microcoded_insn (rtx_insn *insn)
33615 if (!insn || !NONDEBUG_INSN_P (insn)
33616 || GET_CODE (PATTERN (insn)) == USE
33617 || GET_CODE (PATTERN (insn)) == CLOBBER)
33618 return false;
33620 if (rs6000_cpu_attr == CPU_CELL)
33621 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
33623 if (rs6000_sched_groups
33624 && (rs6000_cpu == PROCESSOR_POWER4 || rs6000_cpu == PROCESSOR_POWER5))
33626 enum attr_type type = get_attr_type (insn);
33627 if ((type == TYPE_LOAD
33628 && get_attr_update (insn) == UPDATE_YES
33629 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES)
33630 || ((type == TYPE_LOAD || type == TYPE_STORE)
33631 && get_attr_update (insn) == UPDATE_YES
33632 && get_attr_indexed (insn) == INDEXED_YES)
33633 || type == TYPE_MFCR)
33634 return true;
33637 return false;
33640 /* The function returns true if INSN is cracked into 2 instructions
33641 by the processor (and therefore occupies 2 issue slots). */
33643 static bool
33644 is_cracked_insn (rtx_insn *insn)
33646 if (!insn || !NONDEBUG_INSN_P (insn)
33647 || GET_CODE (PATTERN (insn)) == USE
33648 || GET_CODE (PATTERN (insn)) == CLOBBER)
33649 return false;
33651 if (rs6000_sched_groups
33652 && (rs6000_cpu == PROCESSOR_POWER4 || rs6000_cpu == PROCESSOR_POWER5))
33654 enum attr_type type = get_attr_type (insn);
33655 if ((type == TYPE_LOAD
33656 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES
33657 && get_attr_update (insn) == UPDATE_NO)
33658 || (type == TYPE_LOAD
33659 && get_attr_sign_extend (insn) == SIGN_EXTEND_NO
33660 && get_attr_update (insn) == UPDATE_YES
33661 && get_attr_indexed (insn) == INDEXED_NO)
33662 || (type == TYPE_STORE
33663 && get_attr_update (insn) == UPDATE_YES
33664 && get_attr_indexed (insn) == INDEXED_NO)
33665 || ((type == TYPE_FPLOAD || type == TYPE_FPSTORE)
33666 && get_attr_update (insn) == UPDATE_YES)
33667 || type == TYPE_DELAYED_CR
33668 || (type == TYPE_EXTS
33669 && get_attr_dot (insn) == DOT_YES)
33670 || (type == TYPE_SHIFT
33671 && get_attr_dot (insn) == DOT_YES
33672 && get_attr_var_shift (insn) == VAR_SHIFT_NO)
33673 || (type == TYPE_MUL
33674 && get_attr_dot (insn) == DOT_YES)
33675 || type == TYPE_DIV
33676 || (type == TYPE_INSERT
33677 && get_attr_size (insn) == SIZE_32))
33678 return true;
33681 return false;
33684 /* The function returns true if INSN can be issued only from
33685 the branch slot. */
33687 static bool
33688 is_branch_slot_insn (rtx_insn *insn)
33690 if (!insn || !NONDEBUG_INSN_P (insn)
33691 || GET_CODE (PATTERN (insn)) == USE
33692 || GET_CODE (PATTERN (insn)) == CLOBBER)
33693 return false;
33695 if (rs6000_sched_groups)
33697 enum attr_type type = get_attr_type (insn);
33698 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
33699 return true;
33700 return false;
33703 return false;
33706 /* The function returns true if out_inst sets a value that is
33707 used in the address generation computation of in_insn */
33708 static bool
33709 set_to_load_agen (rtx_insn *out_insn, rtx_insn *in_insn)
33711 rtx out_set, in_set;
33713 /* For performance reasons, only handle the simple case where
33714 both loads are a single_set. */
33715 out_set = single_set (out_insn);
33716 if (out_set)
33718 in_set = single_set (in_insn);
33719 if (in_set)
33720 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
33723 return false;
33726 /* Try to determine base/offset/size parts of the given MEM.
33727 Return true if successful, false if all the values couldn't
33728 be determined.
33730 This function only looks for REG or REG+CONST address forms.
33731 REG+REG address form will return false. */
33733 static bool
33734 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
33735 HOST_WIDE_INT *size)
33737 rtx addr_rtx;
33738 if MEM_SIZE_KNOWN_P (mem)
33739 *size = MEM_SIZE (mem);
33740 else
33741 return false;
33743 addr_rtx = (XEXP (mem, 0));
33744 if (GET_CODE (addr_rtx) == PRE_MODIFY)
33745 addr_rtx = XEXP (addr_rtx, 1);
33747 *offset = 0;
33748 while (GET_CODE (addr_rtx) == PLUS
33749 && CONST_INT_P (XEXP (addr_rtx, 1)))
33751 *offset += INTVAL (XEXP (addr_rtx, 1));
33752 addr_rtx = XEXP (addr_rtx, 0);
33754 if (!REG_P (addr_rtx))
33755 return false;
33757 *base = addr_rtx;
33758 return true;
33761 /* The function returns true if the target storage location of
33762 mem1 is adjacent to the target storage location of mem2 */
33763 /* Return 1 if memory locations are adjacent. */
33765 static bool
33766 adjacent_mem_locations (rtx mem1, rtx mem2)
33768 rtx reg1, reg2;
33769 HOST_WIDE_INT off1, size1, off2, size2;
33771 if (get_memref_parts (mem1, &reg1, &off1, &size1)
33772 && get_memref_parts (mem2, &reg2, &off2, &size2))
33773 return ((REGNO (reg1) == REGNO (reg2))
33774 && ((off1 + size1 == off2)
33775 || (off2 + size2 == off1)));
33777 return false;
33780 /* This function returns true if it can be determined that the two MEM
33781 locations overlap by at least 1 byte based on base reg/offset/size. */
33783 static bool
33784 mem_locations_overlap (rtx mem1, rtx mem2)
33786 rtx reg1, reg2;
33787 HOST_WIDE_INT off1, size1, off2, size2;
33789 if (get_memref_parts (mem1, &reg1, &off1, &size1)
33790 && get_memref_parts (mem2, &reg2, &off2, &size2))
33791 return ((REGNO (reg1) == REGNO (reg2))
33792 && (((off1 <= off2) && (off1 + size1 > off2))
33793 || ((off2 <= off1) && (off2 + size2 > off1))));
33795 return false;
33798 /* A C statement (sans semicolon) to update the integer scheduling
33799 priority INSN_PRIORITY (INSN). Increase the priority to execute the
33800 INSN earlier, reduce the priority to execute INSN later. Do not
33801 define this macro if you do not need to adjust the scheduling
33802 priorities of insns. */
33804 static int
33805 rs6000_adjust_priority (rtx_insn *insn ATTRIBUTE_UNUSED, int priority)
33807 rtx load_mem, str_mem;
33808 /* On machines (like the 750) which have asymmetric integer units,
33809 where one integer unit can do multiply and divides and the other
33810 can't, reduce the priority of multiply/divide so it is scheduled
33811 before other integer operations. */
33813 #if 0
33814 if (! INSN_P (insn))
33815 return priority;
33817 if (GET_CODE (PATTERN (insn)) == USE)
33818 return priority;
33820 switch (rs6000_cpu_attr) {
33821 case CPU_PPC750:
33822 switch (get_attr_type (insn))
33824 default:
33825 break;
33827 case TYPE_MUL:
33828 case TYPE_DIV:
33829 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
33830 priority, priority);
33831 if (priority >= 0 && priority < 0x01000000)
33832 priority >>= 3;
33833 break;
33836 #endif
33838 if (insn_must_be_first_in_group (insn)
33839 && reload_completed
33840 && current_sched_info->sched_max_insns_priority
33841 && rs6000_sched_restricted_insns_priority)
33844 /* Prioritize insns that can be dispatched only in the first
33845 dispatch slot. */
33846 if (rs6000_sched_restricted_insns_priority == 1)
33847 /* Attach highest priority to insn. This means that in
33848 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
33849 precede 'priority' (critical path) considerations. */
33850 return current_sched_info->sched_max_insns_priority;
33851 else if (rs6000_sched_restricted_insns_priority == 2)
33852 /* Increase priority of insn by a minimal amount. This means that in
33853 haifa-sched.c:ready_sort(), only 'priority' (critical path)
33854 considerations precede dispatch-slot restriction considerations. */
33855 return (priority + 1);
33858 if (rs6000_cpu == PROCESSOR_POWER6
33859 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
33860 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
33861 /* Attach highest priority to insn if the scheduler has just issued two
33862 stores and this instruction is a load, or two loads and this instruction
33863 is a store. Power6 wants loads and stores scheduled alternately
33864 when possible */
33865 return current_sched_info->sched_max_insns_priority;
33867 return priority;
33870 /* Return true if the instruction is nonpipelined on the Cell. */
33871 static bool
33872 is_nonpipeline_insn (rtx_insn *insn)
33874 enum attr_type type;
33875 if (!insn || !NONDEBUG_INSN_P (insn)
33876 || GET_CODE (PATTERN (insn)) == USE
33877 || GET_CODE (PATTERN (insn)) == CLOBBER)
33878 return false;
33880 type = get_attr_type (insn);
33881 if (type == TYPE_MUL
33882 || type == TYPE_DIV
33883 || type == TYPE_SDIV
33884 || type == TYPE_DDIV
33885 || type == TYPE_SSQRT
33886 || type == TYPE_DSQRT
33887 || type == TYPE_MFCR
33888 || type == TYPE_MFCRF
33889 || type == TYPE_MFJMPR)
33891 return true;
33893 return false;
33897 /* Return how many instructions the machine can issue per cycle. */
33899 static int
33900 rs6000_issue_rate (void)
33902 /* Unless scheduling for register pressure, use issue rate of 1 for
33903 first scheduling pass to decrease degradation. */
33904 if (!reload_completed && !flag_sched_pressure)
33905 return 1;
33907 switch (rs6000_cpu_attr) {
33908 case CPU_RS64A:
33909 case CPU_PPC601: /* ? */
33910 case CPU_PPC7450:
33911 return 3;
33912 case CPU_PPC440:
33913 case CPU_PPC603:
33914 case CPU_PPC750:
33915 case CPU_PPC7400:
33916 case CPU_PPC8540:
33917 case CPU_PPC8548:
33918 case CPU_CELL:
33919 case CPU_PPCE300C2:
33920 case CPU_PPCE300C3:
33921 case CPU_PPCE500MC:
33922 case CPU_PPCE500MC64:
33923 case CPU_PPCE5500:
33924 case CPU_PPCE6500:
33925 case CPU_TITAN:
33926 return 2;
33927 case CPU_PPC476:
33928 case CPU_PPC604:
33929 case CPU_PPC604E:
33930 case CPU_PPC620:
33931 case CPU_PPC630:
33932 return 4;
33933 case CPU_POWER4:
33934 case CPU_POWER5:
33935 case CPU_POWER6:
33936 case CPU_POWER7:
33937 return 5;
33938 case CPU_POWER8:
33939 return 7;
33940 case CPU_POWER9:
33941 return 6;
33942 default:
33943 return 1;
33947 /* Return how many instructions to look ahead for better insn
33948 scheduling. */
33950 static int
33951 rs6000_use_sched_lookahead (void)
33953 switch (rs6000_cpu_attr)
33955 case CPU_PPC8540:
33956 case CPU_PPC8548:
33957 return 4;
33959 case CPU_CELL:
33960 return (reload_completed ? 8 : 0);
33962 default:
33963 return 0;
33967 /* We are choosing insn from the ready queue. Return zero if INSN can be
33968 chosen. */
33969 static int
33970 rs6000_use_sched_lookahead_guard (rtx_insn *insn, int ready_index)
33972 if (ready_index == 0)
33973 return 0;
33975 if (rs6000_cpu_attr != CPU_CELL)
33976 return 0;
33978 gcc_assert (insn != NULL_RTX && INSN_P (insn));
33980 if (!reload_completed
33981 || is_nonpipeline_insn (insn)
33982 || is_microcoded_insn (insn))
33983 return 1;
33985 return 0;
33988 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
33989 and return true. */
33991 static bool
33992 find_mem_ref (rtx pat, rtx *mem_ref)
33994 const char * fmt;
33995 int i, j;
33997 /* stack_tie does not produce any real memory traffic. */
33998 if (tie_operand (pat, VOIDmode))
33999 return false;
34001 if (GET_CODE (pat) == MEM)
34003 *mem_ref = pat;
34004 return true;
34007 /* Recursively process the pattern. */
34008 fmt = GET_RTX_FORMAT (GET_CODE (pat));
34010 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
34012 if (fmt[i] == 'e')
34014 if (find_mem_ref (XEXP (pat, i), mem_ref))
34015 return true;
34017 else if (fmt[i] == 'E')
34018 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
34020 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
34021 return true;
34025 return false;
34028 /* Determine if PAT is a PATTERN of a load insn. */
34030 static bool
34031 is_load_insn1 (rtx pat, rtx *load_mem)
34033 if (!pat || pat == NULL_RTX)
34034 return false;
34036 if (GET_CODE (pat) == SET)
34037 return find_mem_ref (SET_SRC (pat), load_mem);
34039 if (GET_CODE (pat) == PARALLEL)
34041 int i;
34043 for (i = 0; i < XVECLEN (pat, 0); i++)
34044 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
34045 return true;
34048 return false;
34051 /* Determine if INSN loads from memory. */
34053 static bool
34054 is_load_insn (rtx insn, rtx *load_mem)
34056 if (!insn || !INSN_P (insn))
34057 return false;
34059 if (CALL_P (insn))
34060 return false;
34062 return is_load_insn1 (PATTERN (insn), load_mem);
34065 /* Determine if PAT is a PATTERN of a store insn. */
34067 static bool
34068 is_store_insn1 (rtx pat, rtx *str_mem)
34070 if (!pat || pat == NULL_RTX)
34071 return false;
34073 if (GET_CODE (pat) == SET)
34074 return find_mem_ref (SET_DEST (pat), str_mem);
34076 if (GET_CODE (pat) == PARALLEL)
34078 int i;
34080 for (i = 0; i < XVECLEN (pat, 0); i++)
34081 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
34082 return true;
34085 return false;
34088 /* Determine if INSN stores to memory. */
34090 static bool
34091 is_store_insn (rtx insn, rtx *str_mem)
34093 if (!insn || !INSN_P (insn))
34094 return false;
34096 return is_store_insn1 (PATTERN (insn), str_mem);
34099 /* Return whether TYPE is a Power9 pairable vector instruction type. */
34101 static bool
34102 is_power9_pairable_vec_type (enum attr_type type)
34104 switch (type)
34106 case TYPE_VECSIMPLE:
34107 case TYPE_VECCOMPLEX:
34108 case TYPE_VECDIV:
34109 case TYPE_VECCMP:
34110 case TYPE_VECPERM:
34111 case TYPE_VECFLOAT:
34112 case TYPE_VECFDIV:
34113 case TYPE_VECDOUBLE:
34114 return true;
34115 default:
34116 break;
34118 return false;
34121 /* Returns whether the dependence between INSN and NEXT is considered
34122 costly by the given target. */
34124 static bool
34125 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
34127 rtx insn;
34128 rtx next;
34129 rtx load_mem, str_mem;
34131 /* If the flag is not enabled - no dependence is considered costly;
34132 allow all dependent insns in the same group.
34133 This is the most aggressive option. */
34134 if (rs6000_sched_costly_dep == no_dep_costly)
34135 return false;
34137 /* If the flag is set to 1 - a dependence is always considered costly;
34138 do not allow dependent instructions in the same group.
34139 This is the most conservative option. */
34140 if (rs6000_sched_costly_dep == all_deps_costly)
34141 return true;
34143 insn = DEP_PRO (dep);
34144 next = DEP_CON (dep);
34146 if (rs6000_sched_costly_dep == store_to_load_dep_costly
34147 && is_load_insn (next, &load_mem)
34148 && is_store_insn (insn, &str_mem))
34149 /* Prevent load after store in the same group. */
34150 return true;
34152 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
34153 && is_load_insn (next, &load_mem)
34154 && is_store_insn (insn, &str_mem)
34155 && DEP_TYPE (dep) == REG_DEP_TRUE
34156 && mem_locations_overlap(str_mem, load_mem))
34157 /* Prevent load after store in the same group if it is a true
34158 dependence. */
34159 return true;
34161 /* The flag is set to X; dependences with latency >= X are considered costly,
34162 and will not be scheduled in the same group. */
34163 if (rs6000_sched_costly_dep <= max_dep_latency
34164 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
34165 return true;
34167 return false;
34170 /* Return the next insn after INSN that is found before TAIL is reached,
34171 skipping any "non-active" insns - insns that will not actually occupy
34172 an issue slot. Return NULL_RTX if such an insn is not found. */
34174 static rtx_insn *
34175 get_next_active_insn (rtx_insn *insn, rtx_insn *tail)
34177 if (insn == NULL_RTX || insn == tail)
34178 return NULL;
34180 while (1)
34182 insn = NEXT_INSN (insn);
34183 if (insn == NULL_RTX || insn == tail)
34184 return NULL;
34186 if (CALL_P (insn)
34187 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
34188 || (NONJUMP_INSN_P (insn)
34189 && GET_CODE (PATTERN (insn)) != USE
34190 && GET_CODE (PATTERN (insn)) != CLOBBER
34191 && INSN_CODE (insn) != CODE_FOR_stack_tie))
34192 break;
34194 return insn;
34197 /* Do Power9 specific sched_reorder2 reordering of ready list. */
34199 static int
34200 power9_sched_reorder2 (rtx_insn **ready, int lastpos)
34202 int pos;
34203 int i;
34204 rtx_insn *tmp;
34205 enum attr_type type, type2;
34207 type = get_attr_type (last_scheduled_insn);
34209 /* Try to issue fixed point divides back-to-back in pairs so they will be
34210 routed to separate execution units and execute in parallel. */
34211 if (type == TYPE_DIV && divide_cnt == 0)
34213 /* First divide has been scheduled. */
34214 divide_cnt = 1;
34216 /* Scan the ready list looking for another divide, if found move it
34217 to the end of the list so it is chosen next. */
34218 pos = lastpos;
34219 while (pos >= 0)
34221 if (recog_memoized (ready[pos]) >= 0
34222 && get_attr_type (ready[pos]) == TYPE_DIV)
34224 tmp = ready[pos];
34225 for (i = pos; i < lastpos; i++)
34226 ready[i] = ready[i + 1];
34227 ready[lastpos] = tmp;
34228 break;
34230 pos--;
34233 else
34235 /* Last insn was the 2nd divide or not a divide, reset the counter. */
34236 divide_cnt = 0;
34238 /* The best dispatch throughput for vector and vector load insns can be
34239 achieved by interleaving a vector and vector load such that they'll
34240 dispatch to the same superslice. If this pairing cannot be achieved
34241 then it is best to pair vector insns together and vector load insns
34242 together.
34244 To aid in this pairing, vec_pairing maintains the current state with
34245 the following values:
34247 0 : Initial state, no vecload/vector pairing has been started.
34249 1 : A vecload or vector insn has been issued and a candidate for
34250 pairing has been found and moved to the end of the ready
34251 list. */
34252 if (type == TYPE_VECLOAD)
34254 /* Issued a vecload. */
34255 if (vec_pairing == 0)
34257 int vecload_pos = -1;
34258 /* We issued a single vecload, look for a vector insn to pair it
34259 with. If one isn't found, try to pair another vecload. */
34260 pos = lastpos;
34261 while (pos >= 0)
34263 if (recog_memoized (ready[pos]) >= 0)
34265 type2 = get_attr_type (ready[pos]);
34266 if (is_power9_pairable_vec_type (type2))
34268 /* Found a vector insn to pair with, move it to the
34269 end of the ready list so it is scheduled next. */
34270 tmp = ready[pos];
34271 for (i = pos; i < lastpos; i++)
34272 ready[i] = ready[i + 1];
34273 ready[lastpos] = tmp;
34274 vec_pairing = 1;
34275 return cached_can_issue_more;
34277 else if (type2 == TYPE_VECLOAD && vecload_pos == -1)
34278 /* Remember position of first vecload seen. */
34279 vecload_pos = pos;
34281 pos--;
34283 if (vecload_pos >= 0)
34285 /* Didn't find a vector to pair with but did find a vecload,
34286 move it to the end of the ready list. */
34287 tmp = ready[vecload_pos];
34288 for (i = vecload_pos; i < lastpos; i++)
34289 ready[i] = ready[i + 1];
34290 ready[lastpos] = tmp;
34291 vec_pairing = 1;
34292 return cached_can_issue_more;
34296 else if (is_power9_pairable_vec_type (type))
34298 /* Issued a vector operation. */
34299 if (vec_pairing == 0)
34301 int vec_pos = -1;
34302 /* We issued a single vector insn, look for a vecload to pair it
34303 with. If one isn't found, try to pair another vector. */
34304 pos = lastpos;
34305 while (pos >= 0)
34307 if (recog_memoized (ready[pos]) >= 0)
34309 type2 = get_attr_type (ready[pos]);
34310 if (type2 == TYPE_VECLOAD)
34312 /* Found a vecload insn to pair with, move it to the
34313 end of the ready list so it is scheduled next. */
34314 tmp = ready[pos];
34315 for (i = pos; i < lastpos; i++)
34316 ready[i] = ready[i + 1];
34317 ready[lastpos] = tmp;
34318 vec_pairing = 1;
34319 return cached_can_issue_more;
34321 else if (is_power9_pairable_vec_type (type2)
34322 && vec_pos == -1)
34323 /* Remember position of first vector insn seen. */
34324 vec_pos = pos;
34326 pos--;
34328 if (vec_pos >= 0)
34330 /* Didn't find a vecload to pair with but did find a vector
34331 insn, move it to the end of the ready list. */
34332 tmp = ready[vec_pos];
34333 for (i = vec_pos; i < lastpos; i++)
34334 ready[i] = ready[i + 1];
34335 ready[lastpos] = tmp;
34336 vec_pairing = 1;
34337 return cached_can_issue_more;
34342 /* We've either finished a vec/vecload pair, couldn't find an insn to
34343 continue the current pair, or the last insn had nothing to do with
34344 with pairing. In any case, reset the state. */
34345 vec_pairing = 0;
34348 return cached_can_issue_more;
34351 /* We are about to begin issuing insns for this clock cycle. */
34353 static int
34354 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
34355 rtx_insn **ready ATTRIBUTE_UNUSED,
34356 int *pn_ready ATTRIBUTE_UNUSED,
34357 int clock_var ATTRIBUTE_UNUSED)
34359 int n_ready = *pn_ready;
34361 if (sched_verbose)
34362 fprintf (dump, "// rs6000_sched_reorder :\n");
34364 /* Reorder the ready list, if the second to last ready insn
34365 is a nonepipeline insn. */
34366 if (rs6000_cpu_attr == CPU_CELL && n_ready > 1)
34368 if (is_nonpipeline_insn (ready[n_ready - 1])
34369 && (recog_memoized (ready[n_ready - 2]) > 0))
34370 /* Simply swap first two insns. */
34371 std::swap (ready[n_ready - 1], ready[n_ready - 2]);
34374 if (rs6000_cpu == PROCESSOR_POWER6)
34375 load_store_pendulum = 0;
34377 return rs6000_issue_rate ();
34380 /* Like rs6000_sched_reorder, but called after issuing each insn. */
34382 static int
34383 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx_insn **ready,
34384 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
34386 if (sched_verbose)
34387 fprintf (dump, "// rs6000_sched_reorder2 :\n");
34389 /* For Power6, we need to handle some special cases to try and keep the
34390 store queue from overflowing and triggering expensive flushes.
34392 This code monitors how load and store instructions are being issued
34393 and skews the ready list one way or the other to increase the likelihood
34394 that a desired instruction is issued at the proper time.
34396 A couple of things are done. First, we maintain a "load_store_pendulum"
34397 to track the current state of load/store issue.
34399 - If the pendulum is at zero, then no loads or stores have been
34400 issued in the current cycle so we do nothing.
34402 - If the pendulum is 1, then a single load has been issued in this
34403 cycle and we attempt to locate another load in the ready list to
34404 issue with it.
34406 - If the pendulum is -2, then two stores have already been
34407 issued in this cycle, so we increase the priority of the first load
34408 in the ready list to increase it's likelihood of being chosen first
34409 in the next cycle.
34411 - If the pendulum is -1, then a single store has been issued in this
34412 cycle and we attempt to locate another store in the ready list to
34413 issue with it, preferring a store to an adjacent memory location to
34414 facilitate store pairing in the store queue.
34416 - If the pendulum is 2, then two loads have already been
34417 issued in this cycle, so we increase the priority of the first store
34418 in the ready list to increase it's likelihood of being chosen first
34419 in the next cycle.
34421 - If the pendulum < -2 or > 2, then do nothing.
34423 Note: This code covers the most common scenarios. There exist non
34424 load/store instructions which make use of the LSU and which
34425 would need to be accounted for to strictly model the behavior
34426 of the machine. Those instructions are currently unaccounted
34427 for to help minimize compile time overhead of this code.
34429 if (rs6000_cpu == PROCESSOR_POWER6 && last_scheduled_insn)
34431 int pos;
34432 int i;
34433 rtx_insn *tmp;
34434 rtx load_mem, str_mem;
34436 if (is_store_insn (last_scheduled_insn, &str_mem))
34437 /* Issuing a store, swing the load_store_pendulum to the left */
34438 load_store_pendulum--;
34439 else if (is_load_insn (last_scheduled_insn, &load_mem))
34440 /* Issuing a load, swing the load_store_pendulum to the right */
34441 load_store_pendulum++;
34442 else
34443 return cached_can_issue_more;
34445 /* If the pendulum is balanced, or there is only one instruction on
34446 the ready list, then all is well, so return. */
34447 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
34448 return cached_can_issue_more;
34450 if (load_store_pendulum == 1)
34452 /* A load has been issued in this cycle. Scan the ready list
34453 for another load to issue with it */
34454 pos = *pn_ready-1;
34456 while (pos >= 0)
34458 if (is_load_insn (ready[pos], &load_mem))
34460 /* Found a load. Move it to the head of the ready list,
34461 and adjust it's priority so that it is more likely to
34462 stay there */
34463 tmp = ready[pos];
34464 for (i=pos; i<*pn_ready-1; i++)
34465 ready[i] = ready[i + 1];
34466 ready[*pn_ready-1] = tmp;
34468 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
34469 INSN_PRIORITY (tmp)++;
34470 break;
34472 pos--;
34475 else if (load_store_pendulum == -2)
34477 /* Two stores have been issued in this cycle. Increase the
34478 priority of the first load in the ready list to favor it for
34479 issuing in the next cycle. */
34480 pos = *pn_ready-1;
34482 while (pos >= 0)
34484 if (is_load_insn (ready[pos], &load_mem)
34485 && !sel_sched_p ()
34486 && INSN_PRIORITY_KNOWN (ready[pos]))
34488 INSN_PRIORITY (ready[pos])++;
34490 /* Adjust the pendulum to account for the fact that a load
34491 was found and increased in priority. This is to prevent
34492 increasing the priority of multiple loads */
34493 load_store_pendulum--;
34495 break;
34497 pos--;
34500 else if (load_store_pendulum == -1)
34502 /* A store has been issued in this cycle. Scan the ready list for
34503 another store to issue with it, preferring a store to an adjacent
34504 memory location */
34505 int first_store_pos = -1;
34507 pos = *pn_ready-1;
34509 while (pos >= 0)
34511 if (is_store_insn (ready[pos], &str_mem))
34513 rtx str_mem2;
34514 /* Maintain the index of the first store found on the
34515 list */
34516 if (first_store_pos == -1)
34517 first_store_pos = pos;
34519 if (is_store_insn (last_scheduled_insn, &str_mem2)
34520 && adjacent_mem_locations (str_mem, str_mem2))
34522 /* Found an adjacent store. Move it to the head of the
34523 ready list, and adjust it's priority so that it is
34524 more likely to stay there */
34525 tmp = ready[pos];
34526 for (i=pos; i<*pn_ready-1; i++)
34527 ready[i] = ready[i + 1];
34528 ready[*pn_ready-1] = tmp;
34530 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
34531 INSN_PRIORITY (tmp)++;
34533 first_store_pos = -1;
34535 break;
34538 pos--;
34541 if (first_store_pos >= 0)
34543 /* An adjacent store wasn't found, but a non-adjacent store was,
34544 so move the non-adjacent store to the front of the ready
34545 list, and adjust its priority so that it is more likely to
34546 stay there. */
34547 tmp = ready[first_store_pos];
34548 for (i=first_store_pos; i<*pn_ready-1; i++)
34549 ready[i] = ready[i + 1];
34550 ready[*pn_ready-1] = tmp;
34551 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
34552 INSN_PRIORITY (tmp)++;
34555 else if (load_store_pendulum == 2)
34557 /* Two loads have been issued in this cycle. Increase the priority
34558 of the first store in the ready list to favor it for issuing in
34559 the next cycle. */
34560 pos = *pn_ready-1;
34562 while (pos >= 0)
34564 if (is_store_insn (ready[pos], &str_mem)
34565 && !sel_sched_p ()
34566 && INSN_PRIORITY_KNOWN (ready[pos]))
34568 INSN_PRIORITY (ready[pos])++;
34570 /* Adjust the pendulum to account for the fact that a store
34571 was found and increased in priority. This is to prevent
34572 increasing the priority of multiple stores */
34573 load_store_pendulum++;
34575 break;
34577 pos--;
34582 /* Do Power9 dependent reordering if necessary. */
34583 if (rs6000_cpu == PROCESSOR_POWER9 && last_scheduled_insn
34584 && recog_memoized (last_scheduled_insn) >= 0)
34585 return power9_sched_reorder2 (ready, *pn_ready - 1);
34587 return cached_can_issue_more;
34590 /* Return whether the presence of INSN causes a dispatch group termination
34591 of group WHICH_GROUP.
34593 If WHICH_GROUP == current_group, this function will return true if INSN
34594 causes the termination of the current group (i.e, the dispatch group to
34595 which INSN belongs). This means that INSN will be the last insn in the
34596 group it belongs to.
34598 If WHICH_GROUP == previous_group, this function will return true if INSN
34599 causes the termination of the previous group (i.e, the dispatch group that
34600 precedes the group to which INSN belongs). This means that INSN will be
34601 the first insn in the group it belongs to). */
34603 static bool
34604 insn_terminates_group_p (rtx_insn *insn, enum group_termination which_group)
34606 bool first, last;
34608 if (! insn)
34609 return false;
34611 first = insn_must_be_first_in_group (insn);
34612 last = insn_must_be_last_in_group (insn);
34614 if (first && last)
34615 return true;
34617 if (which_group == current_group)
34618 return last;
34619 else if (which_group == previous_group)
34620 return first;
34622 return false;
34626 static bool
34627 insn_must_be_first_in_group (rtx_insn *insn)
34629 enum attr_type type;
34631 if (!insn
34632 || NOTE_P (insn)
34633 || DEBUG_INSN_P (insn)
34634 || GET_CODE (PATTERN (insn)) == USE
34635 || GET_CODE (PATTERN (insn)) == CLOBBER)
34636 return false;
34638 switch (rs6000_cpu)
34640 case PROCESSOR_POWER5:
34641 if (is_cracked_insn (insn))
34642 return true;
34643 /* FALLTHRU */
34644 case PROCESSOR_POWER4:
34645 if (is_microcoded_insn (insn))
34646 return true;
34648 if (!rs6000_sched_groups)
34649 return false;
34651 type = get_attr_type (insn);
34653 switch (type)
34655 case TYPE_MFCR:
34656 case TYPE_MFCRF:
34657 case TYPE_MTCR:
34658 case TYPE_DELAYED_CR:
34659 case TYPE_CR_LOGICAL:
34660 case TYPE_MTJMPR:
34661 case TYPE_MFJMPR:
34662 case TYPE_DIV:
34663 case TYPE_LOAD_L:
34664 case TYPE_STORE_C:
34665 case TYPE_ISYNC:
34666 case TYPE_SYNC:
34667 return true;
34668 default:
34669 break;
34671 break;
34672 case PROCESSOR_POWER6:
34673 type = get_attr_type (insn);
34675 switch (type)
34677 case TYPE_EXTS:
34678 case TYPE_CNTLZ:
34679 case TYPE_TRAP:
34680 case TYPE_MUL:
34681 case TYPE_INSERT:
34682 case TYPE_FPCOMPARE:
34683 case TYPE_MFCR:
34684 case TYPE_MTCR:
34685 case TYPE_MFJMPR:
34686 case TYPE_MTJMPR:
34687 case TYPE_ISYNC:
34688 case TYPE_SYNC:
34689 case TYPE_LOAD_L:
34690 case TYPE_STORE_C:
34691 return true;
34692 case TYPE_SHIFT:
34693 if (get_attr_dot (insn) == DOT_NO
34694 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
34695 return true;
34696 else
34697 break;
34698 case TYPE_DIV:
34699 if (get_attr_size (insn) == SIZE_32)
34700 return true;
34701 else
34702 break;
34703 case TYPE_LOAD:
34704 case TYPE_STORE:
34705 case TYPE_FPLOAD:
34706 case TYPE_FPSTORE:
34707 if (get_attr_update (insn) == UPDATE_YES)
34708 return true;
34709 else
34710 break;
34711 default:
34712 break;
34714 break;
34715 case PROCESSOR_POWER7:
34716 type = get_attr_type (insn);
34718 switch (type)
34720 case TYPE_CR_LOGICAL:
34721 case TYPE_MFCR:
34722 case TYPE_MFCRF:
34723 case TYPE_MTCR:
34724 case TYPE_DIV:
34725 case TYPE_ISYNC:
34726 case TYPE_LOAD_L:
34727 case TYPE_STORE_C:
34728 case TYPE_MFJMPR:
34729 case TYPE_MTJMPR:
34730 return true;
34731 case TYPE_MUL:
34732 case TYPE_SHIFT:
34733 case TYPE_EXTS:
34734 if (get_attr_dot (insn) == DOT_YES)
34735 return true;
34736 else
34737 break;
34738 case TYPE_LOAD:
34739 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
34740 || get_attr_update (insn) == UPDATE_YES)
34741 return true;
34742 else
34743 break;
34744 case TYPE_STORE:
34745 case TYPE_FPLOAD:
34746 case TYPE_FPSTORE:
34747 if (get_attr_update (insn) == UPDATE_YES)
34748 return true;
34749 else
34750 break;
34751 default:
34752 break;
34754 break;
34755 case PROCESSOR_POWER8:
34756 type = get_attr_type (insn);
34758 switch (type)
34760 case TYPE_CR_LOGICAL:
34761 case TYPE_DELAYED_CR:
34762 case TYPE_MFCR:
34763 case TYPE_MFCRF:
34764 case TYPE_MTCR:
34765 case TYPE_SYNC:
34766 case TYPE_ISYNC:
34767 case TYPE_LOAD_L:
34768 case TYPE_STORE_C:
34769 case TYPE_VECSTORE:
34770 case TYPE_MFJMPR:
34771 case TYPE_MTJMPR:
34772 return true;
34773 case TYPE_SHIFT:
34774 case TYPE_EXTS:
34775 case TYPE_MUL:
34776 if (get_attr_dot (insn) == DOT_YES)
34777 return true;
34778 else
34779 break;
34780 case TYPE_LOAD:
34781 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
34782 || get_attr_update (insn) == UPDATE_YES)
34783 return true;
34784 else
34785 break;
34786 case TYPE_STORE:
34787 if (get_attr_update (insn) == UPDATE_YES
34788 && get_attr_indexed (insn) == INDEXED_YES)
34789 return true;
34790 else
34791 break;
34792 default:
34793 break;
34795 break;
34796 default:
34797 break;
34800 return false;
34803 static bool
34804 insn_must_be_last_in_group (rtx_insn *insn)
34806 enum attr_type type;
34808 if (!insn
34809 || NOTE_P (insn)
34810 || DEBUG_INSN_P (insn)
34811 || GET_CODE (PATTERN (insn)) == USE
34812 || GET_CODE (PATTERN (insn)) == CLOBBER)
34813 return false;
34815 switch (rs6000_cpu) {
34816 case PROCESSOR_POWER4:
34817 case PROCESSOR_POWER5:
34818 if (is_microcoded_insn (insn))
34819 return true;
34821 if (is_branch_slot_insn (insn))
34822 return true;
34824 break;
34825 case PROCESSOR_POWER6:
34826 type = get_attr_type (insn);
34828 switch (type)
34830 case TYPE_EXTS:
34831 case TYPE_CNTLZ:
34832 case TYPE_TRAP:
34833 case TYPE_MUL:
34834 case TYPE_FPCOMPARE:
34835 case TYPE_MFCR:
34836 case TYPE_MTCR:
34837 case TYPE_MFJMPR:
34838 case TYPE_MTJMPR:
34839 case TYPE_ISYNC:
34840 case TYPE_SYNC:
34841 case TYPE_LOAD_L:
34842 case TYPE_STORE_C:
34843 return true;
34844 case TYPE_SHIFT:
34845 if (get_attr_dot (insn) == DOT_NO
34846 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
34847 return true;
34848 else
34849 break;
34850 case TYPE_DIV:
34851 if (get_attr_size (insn) == SIZE_32)
34852 return true;
34853 else
34854 break;
34855 default:
34856 break;
34858 break;
34859 case PROCESSOR_POWER7:
34860 type = get_attr_type (insn);
34862 switch (type)
34864 case TYPE_ISYNC:
34865 case TYPE_SYNC:
34866 case TYPE_LOAD_L:
34867 case TYPE_STORE_C:
34868 return true;
34869 case TYPE_LOAD:
34870 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
34871 && get_attr_update (insn) == UPDATE_YES)
34872 return true;
34873 else
34874 break;
34875 case TYPE_STORE:
34876 if (get_attr_update (insn) == UPDATE_YES
34877 && get_attr_indexed (insn) == INDEXED_YES)
34878 return true;
34879 else
34880 break;
34881 default:
34882 break;
34884 break;
34885 case PROCESSOR_POWER8:
34886 type = get_attr_type (insn);
34888 switch (type)
34890 case TYPE_MFCR:
34891 case TYPE_MTCR:
34892 case TYPE_ISYNC:
34893 case TYPE_SYNC:
34894 case TYPE_LOAD_L:
34895 case TYPE_STORE_C:
34896 return true;
34897 case TYPE_LOAD:
34898 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
34899 && get_attr_update (insn) == UPDATE_YES)
34900 return true;
34901 else
34902 break;
34903 case TYPE_STORE:
34904 if (get_attr_update (insn) == UPDATE_YES
34905 && get_attr_indexed (insn) == INDEXED_YES)
34906 return true;
34907 else
34908 break;
34909 default:
34910 break;
34912 break;
34913 default:
34914 break;
34917 return false;
34920 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
34921 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
34923 static bool
34924 is_costly_group (rtx *group_insns, rtx next_insn)
34926 int i;
34927 int issue_rate = rs6000_issue_rate ();
34929 for (i = 0; i < issue_rate; i++)
34931 sd_iterator_def sd_it;
34932 dep_t dep;
34933 rtx insn = group_insns[i];
34935 if (!insn)
34936 continue;
34938 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
34940 rtx next = DEP_CON (dep);
34942 if (next == next_insn
34943 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
34944 return true;
34948 return false;
34951 /* Utility of the function redefine_groups.
34952 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
34953 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
34954 to keep it "far" (in a separate group) from GROUP_INSNS, following
34955 one of the following schemes, depending on the value of the flag
34956 -minsert_sched_nops = X:
34957 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
34958 in order to force NEXT_INSN into a separate group.
34959 (2) X < sched_finish_regroup_exact: insert exactly X nops.
34960 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
34961 insertion (has a group just ended, how many vacant issue slots remain in the
34962 last group, and how many dispatch groups were encountered so far). */
34964 static int
34965 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
34966 rtx_insn *next_insn, bool *group_end, int can_issue_more,
34967 int *group_count)
34969 rtx nop;
34970 bool force;
34971 int issue_rate = rs6000_issue_rate ();
34972 bool end = *group_end;
34973 int i;
34975 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
34976 return can_issue_more;
34978 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
34979 return can_issue_more;
34981 force = is_costly_group (group_insns, next_insn);
34982 if (!force)
34983 return can_issue_more;
34985 if (sched_verbose > 6)
34986 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
34987 *group_count ,can_issue_more);
34989 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
34991 if (*group_end)
34992 can_issue_more = 0;
34994 /* Since only a branch can be issued in the last issue_slot, it is
34995 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
34996 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
34997 in this case the last nop will start a new group and the branch
34998 will be forced to the new group. */
34999 if (can_issue_more && !is_branch_slot_insn (next_insn))
35000 can_issue_more--;
35002 /* Do we have a special group ending nop? */
35003 if (rs6000_cpu_attr == CPU_POWER6 || rs6000_cpu_attr == CPU_POWER7
35004 || rs6000_cpu_attr == CPU_POWER8)
35006 nop = gen_group_ending_nop ();
35007 emit_insn_before (nop, next_insn);
35008 can_issue_more = 0;
35010 else
35011 while (can_issue_more > 0)
35013 nop = gen_nop ();
35014 emit_insn_before (nop, next_insn);
35015 can_issue_more--;
35018 *group_end = true;
35019 return 0;
35022 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
35024 int n_nops = rs6000_sched_insert_nops;
35026 /* Nops can't be issued from the branch slot, so the effective
35027 issue_rate for nops is 'issue_rate - 1'. */
35028 if (can_issue_more == 0)
35029 can_issue_more = issue_rate;
35030 can_issue_more--;
35031 if (can_issue_more == 0)
35033 can_issue_more = issue_rate - 1;
35034 (*group_count)++;
35035 end = true;
35036 for (i = 0; i < issue_rate; i++)
35038 group_insns[i] = 0;
35042 while (n_nops > 0)
35044 nop = gen_nop ();
35045 emit_insn_before (nop, next_insn);
35046 if (can_issue_more == issue_rate - 1) /* new group begins */
35047 end = false;
35048 can_issue_more--;
35049 if (can_issue_more == 0)
35051 can_issue_more = issue_rate - 1;
35052 (*group_count)++;
35053 end = true;
35054 for (i = 0; i < issue_rate; i++)
35056 group_insns[i] = 0;
35059 n_nops--;
35062 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
35063 can_issue_more++;
35065 /* Is next_insn going to start a new group? */
35066 *group_end
35067 = (end
35068 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
35069 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
35070 || (can_issue_more < issue_rate &&
35071 insn_terminates_group_p (next_insn, previous_group)));
35072 if (*group_end && end)
35073 (*group_count)--;
35075 if (sched_verbose > 6)
35076 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
35077 *group_count, can_issue_more);
35078 return can_issue_more;
35081 return can_issue_more;
35084 /* This function tries to synch the dispatch groups that the compiler "sees"
35085 with the dispatch groups that the processor dispatcher is expected to
35086 form in practice. It tries to achieve this synchronization by forcing the
35087 estimated processor grouping on the compiler (as opposed to the function
35088 'pad_goups' which tries to force the scheduler's grouping on the processor).
35090 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
35091 examines the (estimated) dispatch groups that will be formed by the processor
35092 dispatcher. It marks these group boundaries to reflect the estimated
35093 processor grouping, overriding the grouping that the scheduler had marked.
35094 Depending on the value of the flag '-minsert-sched-nops' this function can
35095 force certain insns into separate groups or force a certain distance between
35096 them by inserting nops, for example, if there exists a "costly dependence"
35097 between the insns.
35099 The function estimates the group boundaries that the processor will form as
35100 follows: It keeps track of how many vacant issue slots are available after
35101 each insn. A subsequent insn will start a new group if one of the following
35102 4 cases applies:
35103 - no more vacant issue slots remain in the current dispatch group.
35104 - only the last issue slot, which is the branch slot, is vacant, but the next
35105 insn is not a branch.
35106 - only the last 2 or less issue slots, including the branch slot, are vacant,
35107 which means that a cracked insn (which occupies two issue slots) can't be
35108 issued in this group.
35109 - less than 'issue_rate' slots are vacant, and the next insn always needs to
35110 start a new group. */
35112 static int
35113 redefine_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
35114 rtx_insn *tail)
35116 rtx_insn *insn, *next_insn;
35117 int issue_rate;
35118 int can_issue_more;
35119 int slot, i;
35120 bool group_end;
35121 int group_count = 0;
35122 rtx *group_insns;
35124 /* Initialize. */
35125 issue_rate = rs6000_issue_rate ();
35126 group_insns = XALLOCAVEC (rtx, issue_rate);
35127 for (i = 0; i < issue_rate; i++)
35129 group_insns[i] = 0;
35131 can_issue_more = issue_rate;
35132 slot = 0;
35133 insn = get_next_active_insn (prev_head_insn, tail);
35134 group_end = false;
35136 while (insn != NULL_RTX)
35138 slot = (issue_rate - can_issue_more);
35139 group_insns[slot] = insn;
35140 can_issue_more =
35141 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
35142 if (insn_terminates_group_p (insn, current_group))
35143 can_issue_more = 0;
35145 next_insn = get_next_active_insn (insn, tail);
35146 if (next_insn == NULL_RTX)
35147 return group_count + 1;
35149 /* Is next_insn going to start a new group? */
35150 group_end
35151 = (can_issue_more == 0
35152 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
35153 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
35154 || (can_issue_more < issue_rate &&
35155 insn_terminates_group_p (next_insn, previous_group)));
35157 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
35158 next_insn, &group_end, can_issue_more,
35159 &group_count);
35161 if (group_end)
35163 group_count++;
35164 can_issue_more = 0;
35165 for (i = 0; i < issue_rate; i++)
35167 group_insns[i] = 0;
35171 if (GET_MODE (next_insn) == TImode && can_issue_more)
35172 PUT_MODE (next_insn, VOIDmode);
35173 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
35174 PUT_MODE (next_insn, TImode);
35176 insn = next_insn;
35177 if (can_issue_more == 0)
35178 can_issue_more = issue_rate;
35179 } /* while */
35181 return group_count;
35184 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
35185 dispatch group boundaries that the scheduler had marked. Pad with nops
35186 any dispatch groups which have vacant issue slots, in order to force the
35187 scheduler's grouping on the processor dispatcher. The function
35188 returns the number of dispatch groups found. */
35190 static int
35191 pad_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
35192 rtx_insn *tail)
35194 rtx_insn *insn, *next_insn;
35195 rtx nop;
35196 int issue_rate;
35197 int can_issue_more;
35198 int group_end;
35199 int group_count = 0;
35201 /* Initialize issue_rate. */
35202 issue_rate = rs6000_issue_rate ();
35203 can_issue_more = issue_rate;
35205 insn = get_next_active_insn (prev_head_insn, tail);
35206 next_insn = get_next_active_insn (insn, tail);
35208 while (insn != NULL_RTX)
35210 can_issue_more =
35211 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
35213 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
35215 if (next_insn == NULL_RTX)
35216 break;
35218 if (group_end)
35220 /* If the scheduler had marked group termination at this location
35221 (between insn and next_insn), and neither insn nor next_insn will
35222 force group termination, pad the group with nops to force group
35223 termination. */
35224 if (can_issue_more
35225 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
35226 && !insn_terminates_group_p (insn, current_group)
35227 && !insn_terminates_group_p (next_insn, previous_group))
35229 if (!is_branch_slot_insn (next_insn))
35230 can_issue_more--;
35232 while (can_issue_more)
35234 nop = gen_nop ();
35235 emit_insn_before (nop, next_insn);
35236 can_issue_more--;
35240 can_issue_more = issue_rate;
35241 group_count++;
35244 insn = next_insn;
35245 next_insn = get_next_active_insn (insn, tail);
35248 return group_count;
35251 /* We're beginning a new block. Initialize data structures as necessary. */
35253 static void
35254 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
35255 int sched_verbose ATTRIBUTE_UNUSED,
35256 int max_ready ATTRIBUTE_UNUSED)
35258 last_scheduled_insn = NULL;
35259 load_store_pendulum = 0;
35260 divide_cnt = 0;
35261 vec_pairing = 0;
35264 /* The following function is called at the end of scheduling BB.
35265 After reload, it inserts nops at insn group bundling. */
35267 static void
35268 rs6000_sched_finish (FILE *dump, int sched_verbose)
35270 int n_groups;
35272 if (sched_verbose)
35273 fprintf (dump, "=== Finishing schedule.\n");
35275 if (reload_completed && rs6000_sched_groups)
35277 /* Do not run sched_finish hook when selective scheduling enabled. */
35278 if (sel_sched_p ())
35279 return;
35281 if (rs6000_sched_insert_nops == sched_finish_none)
35282 return;
35284 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
35285 n_groups = pad_groups (dump, sched_verbose,
35286 current_sched_info->prev_head,
35287 current_sched_info->next_tail);
35288 else
35289 n_groups = redefine_groups (dump, sched_verbose,
35290 current_sched_info->prev_head,
35291 current_sched_info->next_tail);
35293 if (sched_verbose >= 6)
35295 fprintf (dump, "ngroups = %d\n", n_groups);
35296 print_rtl (dump, current_sched_info->prev_head);
35297 fprintf (dump, "Done finish_sched\n");
35302 struct rs6000_sched_context
35304 short cached_can_issue_more;
35305 rtx_insn *last_scheduled_insn;
35306 int load_store_pendulum;
35307 int divide_cnt;
35308 int vec_pairing;
35311 typedef struct rs6000_sched_context rs6000_sched_context_def;
35312 typedef rs6000_sched_context_def *rs6000_sched_context_t;
35314 /* Allocate store for new scheduling context. */
35315 static void *
35316 rs6000_alloc_sched_context (void)
35318 return xmalloc (sizeof (rs6000_sched_context_def));
35321 /* If CLEAN_P is true then initializes _SC with clean data,
35322 and from the global context otherwise. */
35323 static void
35324 rs6000_init_sched_context (void *_sc, bool clean_p)
35326 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
35328 if (clean_p)
35330 sc->cached_can_issue_more = 0;
35331 sc->last_scheduled_insn = NULL;
35332 sc->load_store_pendulum = 0;
35333 sc->divide_cnt = 0;
35334 sc->vec_pairing = 0;
35336 else
35338 sc->cached_can_issue_more = cached_can_issue_more;
35339 sc->last_scheduled_insn = last_scheduled_insn;
35340 sc->load_store_pendulum = load_store_pendulum;
35341 sc->divide_cnt = divide_cnt;
35342 sc->vec_pairing = vec_pairing;
35346 /* Sets the global scheduling context to the one pointed to by _SC. */
35347 static void
35348 rs6000_set_sched_context (void *_sc)
35350 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
35352 gcc_assert (sc != NULL);
35354 cached_can_issue_more = sc->cached_can_issue_more;
35355 last_scheduled_insn = sc->last_scheduled_insn;
35356 load_store_pendulum = sc->load_store_pendulum;
35357 divide_cnt = sc->divide_cnt;
35358 vec_pairing = sc->vec_pairing;
35361 /* Free _SC. */
35362 static void
35363 rs6000_free_sched_context (void *_sc)
35365 gcc_assert (_sc != NULL);
35367 free (_sc);
35370 static bool
35371 rs6000_sched_can_speculate_insn (rtx_insn *insn)
35373 switch (get_attr_type (insn))
35375 case TYPE_DIV:
35376 case TYPE_SDIV:
35377 case TYPE_DDIV:
35378 case TYPE_VECDIV:
35379 case TYPE_SSQRT:
35380 case TYPE_DSQRT:
35381 return false;
35383 default:
35384 return true;
35388 /* Length in units of the trampoline for entering a nested function. */
35391 rs6000_trampoline_size (void)
35393 int ret = 0;
35395 switch (DEFAULT_ABI)
35397 default:
35398 gcc_unreachable ();
35400 case ABI_AIX:
35401 ret = (TARGET_32BIT) ? 12 : 24;
35402 break;
35404 case ABI_ELFv2:
35405 gcc_assert (!TARGET_32BIT);
35406 ret = 32;
35407 break;
35409 case ABI_DARWIN:
35410 case ABI_V4:
35411 ret = (TARGET_32BIT) ? 40 : 48;
35412 break;
35415 return ret;
35418 /* Emit RTL insns to initialize the variable parts of a trampoline.
35419 FNADDR is an RTX for the address of the function's pure code.
35420 CXT is an RTX for the static chain value for the function. */
35422 static void
35423 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
35425 int regsize = (TARGET_32BIT) ? 4 : 8;
35426 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
35427 rtx ctx_reg = force_reg (Pmode, cxt);
35428 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
35430 switch (DEFAULT_ABI)
35432 default:
35433 gcc_unreachable ();
35435 /* Under AIX, just build the 3 word function descriptor */
35436 case ABI_AIX:
35438 rtx fnmem, fn_reg, toc_reg;
35440 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
35441 error ("You cannot take the address of a nested function if you use "
35442 "the -mno-pointers-to-nested-functions option.");
35444 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
35445 fn_reg = gen_reg_rtx (Pmode);
35446 toc_reg = gen_reg_rtx (Pmode);
35448 /* Macro to shorten the code expansions below. */
35449 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
35451 m_tramp = replace_equiv_address (m_tramp, addr);
35453 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
35454 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
35455 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
35456 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
35457 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
35459 # undef MEM_PLUS
35461 break;
35463 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
35464 case ABI_ELFv2:
35465 case ABI_DARWIN:
35466 case ABI_V4:
35467 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
35468 LCT_NORMAL, VOIDmode, 4,
35469 addr, Pmode,
35470 GEN_INT (rs6000_trampoline_size ()), SImode,
35471 fnaddr, Pmode,
35472 ctx_reg, Pmode);
35473 break;
35478 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
35479 identifier as an argument, so the front end shouldn't look it up. */
35481 static bool
35482 rs6000_attribute_takes_identifier_p (const_tree attr_id)
35484 return is_attribute_p ("altivec", attr_id);
35487 /* Handle the "altivec" attribute. The attribute may have
35488 arguments as follows:
35490 __attribute__((altivec(vector__)))
35491 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
35492 __attribute__((altivec(bool__))) (always followed by 'unsigned')
35494 and may appear more than once (e.g., 'vector bool char') in a
35495 given declaration. */
35497 static tree
35498 rs6000_handle_altivec_attribute (tree *node,
35499 tree name ATTRIBUTE_UNUSED,
35500 tree args,
35501 int flags ATTRIBUTE_UNUSED,
35502 bool *no_add_attrs)
35504 tree type = *node, result = NULL_TREE;
35505 machine_mode mode;
35506 int unsigned_p;
35507 char altivec_type
35508 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
35509 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
35510 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
35511 : '?');
35513 while (POINTER_TYPE_P (type)
35514 || TREE_CODE (type) == FUNCTION_TYPE
35515 || TREE_CODE (type) == METHOD_TYPE
35516 || TREE_CODE (type) == ARRAY_TYPE)
35517 type = TREE_TYPE (type);
35519 mode = TYPE_MODE (type);
35521 /* Check for invalid AltiVec type qualifiers. */
35522 if (type == long_double_type_node)
35523 error ("use of %<long double%> in AltiVec types is invalid");
35524 else if (type == boolean_type_node)
35525 error ("use of boolean types in AltiVec types is invalid");
35526 else if (TREE_CODE (type) == COMPLEX_TYPE)
35527 error ("use of %<complex%> in AltiVec types is invalid");
35528 else if (DECIMAL_FLOAT_MODE_P (mode))
35529 error ("use of decimal floating point types in AltiVec types is invalid");
35530 else if (!TARGET_VSX)
35532 if (type == long_unsigned_type_node || type == long_integer_type_node)
35534 if (TARGET_64BIT)
35535 error ("use of %<long%> in AltiVec types is invalid for "
35536 "64-bit code without -mvsx");
35537 else if (rs6000_warn_altivec_long)
35538 warning (0, "use of %<long%> in AltiVec types is deprecated; "
35539 "use %<int%>");
35541 else if (type == long_long_unsigned_type_node
35542 || type == long_long_integer_type_node)
35543 error ("use of %<long long%> in AltiVec types is invalid without "
35544 "-mvsx");
35545 else if (type == double_type_node)
35546 error ("use of %<double%> in AltiVec types is invalid without -mvsx");
35549 switch (altivec_type)
35551 case 'v':
35552 unsigned_p = TYPE_UNSIGNED (type);
35553 switch (mode)
35555 case TImode:
35556 result = (unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node);
35557 break;
35558 case DImode:
35559 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
35560 break;
35561 case SImode:
35562 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
35563 break;
35564 case HImode:
35565 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
35566 break;
35567 case QImode:
35568 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
35569 break;
35570 case SFmode: result = V4SF_type_node; break;
35571 case DFmode: result = V2DF_type_node; break;
35572 /* If the user says 'vector int bool', we may be handed the 'bool'
35573 attribute _before_ the 'vector' attribute, and so select the
35574 proper type in the 'b' case below. */
35575 case V4SImode: case V8HImode: case V16QImode: case V4SFmode:
35576 case V2DImode: case V2DFmode:
35577 result = type;
35578 default: break;
35580 break;
35581 case 'b':
35582 switch (mode)
35584 case DImode: case V2DImode: result = bool_V2DI_type_node; break;
35585 case SImode: case V4SImode: result = bool_V4SI_type_node; break;
35586 case HImode: case V8HImode: result = bool_V8HI_type_node; break;
35587 case QImode: case V16QImode: result = bool_V16QI_type_node;
35588 default: break;
35590 break;
35591 case 'p':
35592 switch (mode)
35594 case V8HImode: result = pixel_V8HI_type_node;
35595 default: break;
35597 default: break;
35600 /* Propagate qualifiers attached to the element type
35601 onto the vector type. */
35602 if (result && result != type && TYPE_QUALS (type))
35603 result = build_qualified_type (result, TYPE_QUALS (type));
35605 *no_add_attrs = true; /* No need to hang on to the attribute. */
35607 if (result)
35608 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
35610 return NULL_TREE;
35613 /* AltiVec defines four built-in scalar types that serve as vector
35614 elements; we must teach the compiler how to mangle them. */
35616 static const char *
35617 rs6000_mangle_type (const_tree type)
35619 type = TYPE_MAIN_VARIANT (type);
35621 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
35622 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
35623 return NULL;
35625 if (type == bool_char_type_node) return "U6__boolc";
35626 if (type == bool_short_type_node) return "U6__bools";
35627 if (type == pixel_type_node) return "u7__pixel";
35628 if (type == bool_int_type_node) return "U6__booli";
35629 if (type == bool_long_type_node) return "U6__booll";
35631 /* Use a unique name for __float128 rather than trying to use "e" or "g". Use
35632 "g" for IBM extended double, no matter whether it is long double (using
35633 -mabi=ibmlongdouble) or the distinct __ibm128 type. */
35634 if (TARGET_FLOAT128_TYPE)
35636 if (type == ieee128_float_type_node)
35637 return "U10__float128";
35639 if (type == ibm128_float_type_node)
35640 return "g";
35642 if (type == long_double_type_node && TARGET_LONG_DOUBLE_128)
35643 return (TARGET_IEEEQUAD) ? "U10__float128" : "g";
35646 /* Mangle IBM extended float long double as `g' (__float128) on
35647 powerpc*-linux where long-double-64 previously was the default. */
35648 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
35649 && TARGET_ELF
35650 && TARGET_LONG_DOUBLE_128
35651 && !TARGET_IEEEQUAD)
35652 return "g";
35654 /* For all other types, use normal C++ mangling. */
35655 return NULL;
35658 /* Handle a "longcall" or "shortcall" attribute; arguments as in
35659 struct attribute_spec.handler. */
35661 static tree
35662 rs6000_handle_longcall_attribute (tree *node, tree name,
35663 tree args ATTRIBUTE_UNUSED,
35664 int flags ATTRIBUTE_UNUSED,
35665 bool *no_add_attrs)
35667 if (TREE_CODE (*node) != FUNCTION_TYPE
35668 && TREE_CODE (*node) != FIELD_DECL
35669 && TREE_CODE (*node) != TYPE_DECL)
35671 warning (OPT_Wattributes, "%qE attribute only applies to functions",
35672 name);
35673 *no_add_attrs = true;
35676 return NULL_TREE;
35679 /* Set longcall attributes on all functions declared when
35680 rs6000_default_long_calls is true. */
35681 static void
35682 rs6000_set_default_type_attributes (tree type)
35684 if (rs6000_default_long_calls
35685 && (TREE_CODE (type) == FUNCTION_TYPE
35686 || TREE_CODE (type) == METHOD_TYPE))
35687 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
35688 NULL_TREE,
35689 TYPE_ATTRIBUTES (type));
35691 #if TARGET_MACHO
35692 darwin_set_default_type_attributes (type);
35693 #endif
35696 /* Return a reference suitable for calling a function with the
35697 longcall attribute. */
35700 rs6000_longcall_ref (rtx call_ref)
35702 const char *call_name;
35703 tree node;
35705 if (GET_CODE (call_ref) != SYMBOL_REF)
35706 return call_ref;
35708 /* System V adds '.' to the internal name, so skip them. */
35709 call_name = XSTR (call_ref, 0);
35710 if (*call_name == '.')
35712 while (*call_name == '.')
35713 call_name++;
35715 node = get_identifier (call_name);
35716 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
35719 return force_reg (Pmode, call_ref);
35722 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
35723 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
35724 #endif
35726 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
35727 struct attribute_spec.handler. */
35728 static tree
35729 rs6000_handle_struct_attribute (tree *node, tree name,
35730 tree args ATTRIBUTE_UNUSED,
35731 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
35733 tree *type = NULL;
35734 if (DECL_P (*node))
35736 if (TREE_CODE (*node) == TYPE_DECL)
35737 type = &TREE_TYPE (*node);
35739 else
35740 type = node;
35742 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
35743 || TREE_CODE (*type) == UNION_TYPE)))
35745 warning (OPT_Wattributes, "%qE attribute ignored", name);
35746 *no_add_attrs = true;
35749 else if ((is_attribute_p ("ms_struct", name)
35750 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
35751 || ((is_attribute_p ("gcc_struct", name)
35752 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
35754 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
35755 name);
35756 *no_add_attrs = true;
35759 return NULL_TREE;
35762 static bool
35763 rs6000_ms_bitfield_layout_p (const_tree record_type)
35765 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
35766 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
35767 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
35770 #ifdef USING_ELFOS_H
35772 /* A get_unnamed_section callback, used for switching to toc_section. */
35774 static void
35775 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
35777 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
35778 && TARGET_MINIMAL_TOC)
35780 if (!toc_initialized)
35782 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
35783 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
35784 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
35785 fprintf (asm_out_file, "\t.tc ");
35786 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
35787 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
35788 fprintf (asm_out_file, "\n");
35790 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
35791 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
35792 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
35793 fprintf (asm_out_file, " = .+32768\n");
35794 toc_initialized = 1;
35796 else
35797 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
35799 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
35801 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
35802 if (!toc_initialized)
35804 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
35805 toc_initialized = 1;
35808 else
35810 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
35811 if (!toc_initialized)
35813 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
35814 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
35815 fprintf (asm_out_file, " = .+32768\n");
35816 toc_initialized = 1;
35821 /* Implement TARGET_ASM_INIT_SECTIONS. */
35823 static void
35824 rs6000_elf_asm_init_sections (void)
35826 toc_section
35827 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
35829 sdata2_section
35830 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
35831 SDATA2_SECTION_ASM_OP);
35834 /* Implement TARGET_SELECT_RTX_SECTION. */
35836 static section *
35837 rs6000_elf_select_rtx_section (machine_mode mode, rtx x,
35838 unsigned HOST_WIDE_INT align)
35840 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
35841 return toc_section;
35842 else
35843 return default_elf_select_rtx_section (mode, x, align);
35846 /* For a SYMBOL_REF, set generic flags and then perform some
35847 target-specific processing.
35849 When the AIX ABI is requested on a non-AIX system, replace the
35850 function name with the real name (with a leading .) rather than the
35851 function descriptor name. This saves a lot of overriding code to
35852 read the prefixes. */
35854 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
35855 static void
35856 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
35858 default_encode_section_info (decl, rtl, first);
35860 if (first
35861 && TREE_CODE (decl) == FUNCTION_DECL
35862 && !TARGET_AIX
35863 && DEFAULT_ABI == ABI_AIX)
35865 rtx sym_ref = XEXP (rtl, 0);
35866 size_t len = strlen (XSTR (sym_ref, 0));
35867 char *str = XALLOCAVEC (char, len + 2);
35868 str[0] = '.';
35869 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
35870 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
35874 static inline bool
35875 compare_section_name (const char *section, const char *templ)
35877 int len;
35879 len = strlen (templ);
35880 return (strncmp (section, templ, len) == 0
35881 && (section[len] == 0 || section[len] == '.'));
35884 bool
35885 rs6000_elf_in_small_data_p (const_tree decl)
35887 if (rs6000_sdata == SDATA_NONE)
35888 return false;
35890 /* We want to merge strings, so we never consider them small data. */
35891 if (TREE_CODE (decl) == STRING_CST)
35892 return false;
35894 /* Functions are never in the small data area. */
35895 if (TREE_CODE (decl) == FUNCTION_DECL)
35896 return false;
35898 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
35900 const char *section = DECL_SECTION_NAME (decl);
35901 if (compare_section_name (section, ".sdata")
35902 || compare_section_name (section, ".sdata2")
35903 || compare_section_name (section, ".gnu.linkonce.s")
35904 || compare_section_name (section, ".sbss")
35905 || compare_section_name (section, ".sbss2")
35906 || compare_section_name (section, ".gnu.linkonce.sb")
35907 || strcmp (section, ".PPC.EMB.sdata0") == 0
35908 || strcmp (section, ".PPC.EMB.sbss0") == 0)
35909 return true;
35911 else
35913 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
35915 if (size > 0
35916 && size <= g_switch_value
35917 /* If it's not public, and we're not going to reference it there,
35918 there's no need to put it in the small data section. */
35919 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
35920 return true;
35923 return false;
35926 #endif /* USING_ELFOS_H */
35928 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
35930 static bool
35931 rs6000_use_blocks_for_constant_p (machine_mode mode, const_rtx x)
35933 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
35936 /* Do not place thread-local symbols refs in the object blocks. */
35938 static bool
35939 rs6000_use_blocks_for_decl_p (const_tree decl)
35941 return !DECL_THREAD_LOCAL_P (decl);
35944 /* Return a REG that occurs in ADDR with coefficient 1.
35945 ADDR can be effectively incremented by incrementing REG.
35947 r0 is special and we must not select it as an address
35948 register by this routine since our caller will try to
35949 increment the returned register via an "la" instruction. */
35952 find_addr_reg (rtx addr)
35954 while (GET_CODE (addr) == PLUS)
35956 if (GET_CODE (XEXP (addr, 0)) == REG
35957 && REGNO (XEXP (addr, 0)) != 0)
35958 addr = XEXP (addr, 0);
35959 else if (GET_CODE (XEXP (addr, 1)) == REG
35960 && REGNO (XEXP (addr, 1)) != 0)
35961 addr = XEXP (addr, 1);
35962 else if (CONSTANT_P (XEXP (addr, 0)))
35963 addr = XEXP (addr, 1);
35964 else if (CONSTANT_P (XEXP (addr, 1)))
35965 addr = XEXP (addr, 0);
35966 else
35967 gcc_unreachable ();
35969 gcc_assert (GET_CODE (addr) == REG && REGNO (addr) != 0);
35970 return addr;
35973 void
35974 rs6000_fatal_bad_address (rtx op)
35976 fatal_insn ("bad address", op);
35979 #if TARGET_MACHO
35981 typedef struct branch_island_d {
35982 tree function_name;
35983 tree label_name;
35984 int line_number;
35985 } branch_island;
35988 static vec<branch_island, va_gc> *branch_islands;
35990 /* Remember to generate a branch island for far calls to the given
35991 function. */
35993 static void
35994 add_compiler_branch_island (tree label_name, tree function_name,
35995 int line_number)
35997 branch_island bi = {function_name, label_name, line_number};
35998 vec_safe_push (branch_islands, bi);
36001 /* Generate far-jump branch islands for everything recorded in
36002 branch_islands. Invoked immediately after the last instruction of
36003 the epilogue has been emitted; the branch islands must be appended
36004 to, and contiguous with, the function body. Mach-O stubs are
36005 generated in machopic_output_stub(). */
36007 static void
36008 macho_branch_islands (void)
36010 char tmp_buf[512];
36012 while (!vec_safe_is_empty (branch_islands))
36014 branch_island *bi = &branch_islands->last ();
36015 const char *label = IDENTIFIER_POINTER (bi->label_name);
36016 const char *name = IDENTIFIER_POINTER (bi->function_name);
36017 char name_buf[512];
36018 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
36019 if (name[0] == '*' || name[0] == '&')
36020 strcpy (name_buf, name+1);
36021 else
36023 name_buf[0] = '_';
36024 strcpy (name_buf+1, name);
36026 strcpy (tmp_buf, "\n");
36027 strcat (tmp_buf, label);
36028 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
36029 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
36030 dbxout_stabd (N_SLINE, bi->line_number);
36031 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
36032 if (flag_pic)
36034 if (TARGET_LINK_STACK)
36036 char name[32];
36037 get_ppc476_thunk_name (name);
36038 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
36039 strcat (tmp_buf, name);
36040 strcat (tmp_buf, "\n");
36041 strcat (tmp_buf, label);
36042 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
36044 else
36046 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
36047 strcat (tmp_buf, label);
36048 strcat (tmp_buf, "_pic\n");
36049 strcat (tmp_buf, label);
36050 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
36053 strcat (tmp_buf, "\taddis r11,r11,ha16(");
36054 strcat (tmp_buf, name_buf);
36055 strcat (tmp_buf, " - ");
36056 strcat (tmp_buf, label);
36057 strcat (tmp_buf, "_pic)\n");
36059 strcat (tmp_buf, "\tmtlr r0\n");
36061 strcat (tmp_buf, "\taddi r12,r11,lo16(");
36062 strcat (tmp_buf, name_buf);
36063 strcat (tmp_buf, " - ");
36064 strcat (tmp_buf, label);
36065 strcat (tmp_buf, "_pic)\n");
36067 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
36069 else
36071 strcat (tmp_buf, ":\nlis r12,hi16(");
36072 strcat (tmp_buf, name_buf);
36073 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
36074 strcat (tmp_buf, name_buf);
36075 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
36077 output_asm_insn (tmp_buf, 0);
36078 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
36079 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
36080 dbxout_stabd (N_SLINE, bi->line_number);
36081 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
36082 branch_islands->pop ();
36086 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
36087 already there or not. */
36089 static int
36090 no_previous_def (tree function_name)
36092 branch_island *bi;
36093 unsigned ix;
36095 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
36096 if (function_name == bi->function_name)
36097 return 0;
36098 return 1;
36101 /* GET_PREV_LABEL gets the label name from the previous definition of
36102 the function. */
36104 static tree
36105 get_prev_label (tree function_name)
36107 branch_island *bi;
36108 unsigned ix;
36110 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
36111 if (function_name == bi->function_name)
36112 return bi->label_name;
36113 return NULL_TREE;
36116 /* INSN is either a function call or a millicode call. It may have an
36117 unconditional jump in its delay slot.
36119 CALL_DEST is the routine we are calling. */
36121 char *
36122 output_call (rtx_insn *insn, rtx *operands, int dest_operand_number,
36123 int cookie_operand_number)
36125 static char buf[256];
36126 if (darwin_emit_branch_islands
36127 && GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
36128 && (INTVAL (operands[cookie_operand_number]) & CALL_LONG))
36130 tree labelname;
36131 tree funname = get_identifier (XSTR (operands[dest_operand_number], 0));
36133 if (no_previous_def (funname))
36135 rtx label_rtx = gen_label_rtx ();
36136 char *label_buf, temp_buf[256];
36137 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
36138 CODE_LABEL_NUMBER (label_rtx));
36139 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
36140 labelname = get_identifier (label_buf);
36141 add_compiler_branch_island (labelname, funname, insn_line (insn));
36143 else
36144 labelname = get_prev_label (funname);
36146 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
36147 instruction will reach 'foo', otherwise link as 'bl L42'".
36148 "L42" should be a 'branch island', that will do a far jump to
36149 'foo'. Branch islands are generated in
36150 macho_branch_islands(). */
36151 sprintf (buf, "jbsr %%z%d,%.246s",
36152 dest_operand_number, IDENTIFIER_POINTER (labelname));
36154 else
36155 sprintf (buf, "bl %%z%d", dest_operand_number);
36156 return buf;
36159 /* Generate PIC and indirect symbol stubs. */
36161 void
36162 machopic_output_stub (FILE *file, const char *symb, const char *stub)
36164 unsigned int length;
36165 char *symbol_name, *lazy_ptr_name;
36166 char *local_label_0;
36167 static int label = 0;
36169 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
36170 symb = (*targetm.strip_name_encoding) (symb);
36173 length = strlen (symb);
36174 symbol_name = XALLOCAVEC (char, length + 32);
36175 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
36177 lazy_ptr_name = XALLOCAVEC (char, length + 32);
36178 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
36180 if (flag_pic == 2)
36181 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
36182 else
36183 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
36185 if (flag_pic == 2)
36187 fprintf (file, "\t.align 5\n");
36189 fprintf (file, "%s:\n", stub);
36190 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
36192 label++;
36193 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
36194 sprintf (local_label_0, "\"L%011d$spb\"", label);
36196 fprintf (file, "\tmflr r0\n");
36197 if (TARGET_LINK_STACK)
36199 char name[32];
36200 get_ppc476_thunk_name (name);
36201 fprintf (file, "\tbl %s\n", name);
36202 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
36204 else
36206 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
36207 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
36209 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
36210 lazy_ptr_name, local_label_0);
36211 fprintf (file, "\tmtlr r0\n");
36212 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
36213 (TARGET_64BIT ? "ldu" : "lwzu"),
36214 lazy_ptr_name, local_label_0);
36215 fprintf (file, "\tmtctr r12\n");
36216 fprintf (file, "\tbctr\n");
36218 else
36220 fprintf (file, "\t.align 4\n");
36222 fprintf (file, "%s:\n", stub);
36223 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
36225 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
36226 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
36227 (TARGET_64BIT ? "ldu" : "lwzu"),
36228 lazy_ptr_name);
36229 fprintf (file, "\tmtctr r12\n");
36230 fprintf (file, "\tbctr\n");
36233 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
36234 fprintf (file, "%s:\n", lazy_ptr_name);
36235 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
36236 fprintf (file, "%sdyld_stub_binding_helper\n",
36237 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
36240 /* Legitimize PIC addresses. If the address is already
36241 position-independent, we return ORIG. Newly generated
36242 position-independent addresses go into a reg. This is REG if non
36243 zero, otherwise we allocate register(s) as necessary. */
36245 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
36248 rs6000_machopic_legitimize_pic_address (rtx orig, machine_mode mode,
36249 rtx reg)
36251 rtx base, offset;
36253 if (reg == NULL && ! reload_in_progress && ! reload_completed)
36254 reg = gen_reg_rtx (Pmode);
36256 if (GET_CODE (orig) == CONST)
36258 rtx reg_temp;
36260 if (GET_CODE (XEXP (orig, 0)) == PLUS
36261 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
36262 return orig;
36264 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
36266 /* Use a different reg for the intermediate value, as
36267 it will be marked UNCHANGING. */
36268 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
36269 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
36270 Pmode, reg_temp);
36271 offset =
36272 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
36273 Pmode, reg);
36275 if (GET_CODE (offset) == CONST_INT)
36277 if (SMALL_INT (offset))
36278 return plus_constant (Pmode, base, INTVAL (offset));
36279 else if (! reload_in_progress && ! reload_completed)
36280 offset = force_reg (Pmode, offset);
36281 else
36283 rtx mem = force_const_mem (Pmode, orig);
36284 return machopic_legitimize_pic_address (mem, Pmode, reg);
36287 return gen_rtx_PLUS (Pmode, base, offset);
36290 /* Fall back on generic machopic code. */
36291 return machopic_legitimize_pic_address (orig, mode, reg);
36294 /* Output a .machine directive for the Darwin assembler, and call
36295 the generic start_file routine. */
36297 static void
36298 rs6000_darwin_file_start (void)
36300 static const struct
36302 const char *arg;
36303 const char *name;
36304 HOST_WIDE_INT if_set;
36305 } mapping[] = {
36306 { "ppc64", "ppc64", MASK_64BIT },
36307 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
36308 { "power4", "ppc970", 0 },
36309 { "G5", "ppc970", 0 },
36310 { "7450", "ppc7450", 0 },
36311 { "7400", "ppc7400", MASK_ALTIVEC },
36312 { "G4", "ppc7400", 0 },
36313 { "750", "ppc750", 0 },
36314 { "740", "ppc750", 0 },
36315 { "G3", "ppc750", 0 },
36316 { "604e", "ppc604e", 0 },
36317 { "604", "ppc604", 0 },
36318 { "603e", "ppc603", 0 },
36319 { "603", "ppc603", 0 },
36320 { "601", "ppc601", 0 },
36321 { NULL, "ppc", 0 } };
36322 const char *cpu_id = "";
36323 size_t i;
36325 rs6000_file_start ();
36326 darwin_file_start ();
36328 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
36330 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
36331 cpu_id = rs6000_default_cpu;
36333 if (global_options_set.x_rs6000_cpu_index)
36334 cpu_id = processor_target_table[rs6000_cpu_index].name;
36336 /* Look through the mapping array. Pick the first name that either
36337 matches the argument, has a bit set in IF_SET that is also set
36338 in the target flags, or has a NULL name. */
36340 i = 0;
36341 while (mapping[i].arg != NULL
36342 && strcmp (mapping[i].arg, cpu_id) != 0
36343 && (mapping[i].if_set & rs6000_isa_flags) == 0)
36344 i++;
36346 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
36349 #endif /* TARGET_MACHO */
36351 #if TARGET_ELF
36352 static int
36353 rs6000_elf_reloc_rw_mask (void)
36355 if (flag_pic)
36356 return 3;
36357 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
36358 return 2;
36359 else
36360 return 0;
36363 /* Record an element in the table of global constructors. SYMBOL is
36364 a SYMBOL_REF of the function to be called; PRIORITY is a number
36365 between 0 and MAX_INIT_PRIORITY.
36367 This differs from default_named_section_asm_out_constructor in
36368 that we have special handling for -mrelocatable. */
36370 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
36371 static void
36372 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
36374 const char *section = ".ctors";
36375 char buf[18];
36377 if (priority != DEFAULT_INIT_PRIORITY)
36379 sprintf (buf, ".ctors.%.5u",
36380 /* Invert the numbering so the linker puts us in the proper
36381 order; constructors are run from right to left, and the
36382 linker sorts in increasing order. */
36383 MAX_INIT_PRIORITY - priority);
36384 section = buf;
36387 switch_to_section (get_section (section, SECTION_WRITE, NULL));
36388 assemble_align (POINTER_SIZE);
36390 if (DEFAULT_ABI == ABI_V4
36391 && (TARGET_RELOCATABLE || flag_pic > 1))
36393 fputs ("\t.long (", asm_out_file);
36394 output_addr_const (asm_out_file, symbol);
36395 fputs (")@fixup\n", asm_out_file);
36397 else
36398 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
36401 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
36402 static void
36403 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
36405 const char *section = ".dtors";
36406 char buf[18];
36408 if (priority != DEFAULT_INIT_PRIORITY)
36410 sprintf (buf, ".dtors.%.5u",
36411 /* Invert the numbering so the linker puts us in the proper
36412 order; constructors are run from right to left, and the
36413 linker sorts in increasing order. */
36414 MAX_INIT_PRIORITY - priority);
36415 section = buf;
36418 switch_to_section (get_section (section, SECTION_WRITE, NULL));
36419 assemble_align (POINTER_SIZE);
36421 if (DEFAULT_ABI == ABI_V4
36422 && (TARGET_RELOCATABLE || flag_pic > 1))
36424 fputs ("\t.long (", asm_out_file);
36425 output_addr_const (asm_out_file, symbol);
36426 fputs (")@fixup\n", asm_out_file);
36428 else
36429 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
36432 void
36433 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
36435 if (TARGET_64BIT && DEFAULT_ABI != ABI_ELFv2)
36437 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
36438 ASM_OUTPUT_LABEL (file, name);
36439 fputs (DOUBLE_INT_ASM_OP, file);
36440 rs6000_output_function_entry (file, name);
36441 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
36442 if (DOT_SYMBOLS)
36444 fputs ("\t.size\t", file);
36445 assemble_name (file, name);
36446 fputs (",24\n\t.type\t.", file);
36447 assemble_name (file, name);
36448 fputs (",@function\n", file);
36449 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
36451 fputs ("\t.globl\t.", file);
36452 assemble_name (file, name);
36453 putc ('\n', file);
36456 else
36457 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
36458 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
36459 rs6000_output_function_entry (file, name);
36460 fputs (":\n", file);
36461 return;
36464 if (DEFAULT_ABI == ABI_V4
36465 && (TARGET_RELOCATABLE || flag_pic > 1)
36466 && !TARGET_SECURE_PLT
36467 && (!constant_pool_empty_p () || crtl->profile)
36468 && uses_TOC ())
36470 char buf[256];
36472 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
36474 fprintf (file, "\t.long ");
36475 assemble_name (file, toc_label_name);
36476 need_toc_init = 1;
36477 putc ('-', file);
36478 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
36479 assemble_name (file, buf);
36480 putc ('\n', file);
36483 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
36484 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
36486 if (TARGET_CMODEL == CMODEL_LARGE && rs6000_global_entry_point_needed_p ())
36488 char buf[256];
36490 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
36492 fprintf (file, "\t.quad .TOC.-");
36493 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
36494 assemble_name (file, buf);
36495 putc ('\n', file);
36498 if (DEFAULT_ABI == ABI_AIX)
36500 const char *desc_name, *orig_name;
36502 orig_name = (*targetm.strip_name_encoding) (name);
36503 desc_name = orig_name;
36504 while (*desc_name == '.')
36505 desc_name++;
36507 if (TREE_PUBLIC (decl))
36508 fprintf (file, "\t.globl %s\n", desc_name);
36510 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
36511 fprintf (file, "%s:\n", desc_name);
36512 fprintf (file, "\t.long %s\n", orig_name);
36513 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
36514 fputs ("\t.long 0\n", file);
36515 fprintf (file, "\t.previous\n");
36517 ASM_OUTPUT_LABEL (file, name);
36520 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
36521 static void
36522 rs6000_elf_file_end (void)
36524 #ifdef HAVE_AS_GNU_ATTRIBUTE
36525 /* ??? The value emitted depends on options active at file end.
36526 Assume anyone using #pragma or attributes that might change
36527 options knows what they are doing. */
36528 if ((TARGET_64BIT || DEFAULT_ABI == ABI_V4)
36529 && rs6000_passes_float)
36531 int fp;
36533 if (TARGET_DF_FPR | TARGET_DF_SPE)
36534 fp = 1;
36535 else if (TARGET_SF_FPR | TARGET_SF_SPE)
36536 fp = 3;
36537 else
36538 fp = 2;
36539 if (rs6000_passes_long_double)
36541 if (!TARGET_LONG_DOUBLE_128)
36542 fp |= 2 * 4;
36543 else if (TARGET_IEEEQUAD)
36544 fp |= 3 * 4;
36545 else
36546 fp |= 1 * 4;
36548 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n", fp);
36550 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
36552 if (rs6000_passes_vector)
36553 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
36554 (TARGET_ALTIVEC_ABI ? 2
36555 : TARGET_SPE_ABI ? 3
36556 : 1));
36557 if (rs6000_returns_struct)
36558 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
36559 aix_struct_return ? 2 : 1);
36561 #endif
36562 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
36563 if (TARGET_32BIT || DEFAULT_ABI == ABI_ELFv2)
36564 file_end_indicate_exec_stack ();
36565 #endif
36567 if (flag_split_stack)
36568 file_end_indicate_split_stack ();
36570 if (cpu_builtin_p)
36572 /* We have expanded a CPU builtin, so we need to emit a reference to
36573 the special symbol that LIBC uses to declare it supports the
36574 AT_PLATFORM and AT_HWCAP/AT_HWCAP2 in the TCB feature. */
36575 switch_to_section (data_section);
36576 fprintf (asm_out_file, "\t.align %u\n", TARGET_32BIT ? 2 : 3);
36577 fprintf (asm_out_file, "\t%s %s\n",
36578 TARGET_32BIT ? ".long" : ".quad", tcb_verification_symbol);
36581 #endif
36583 #if TARGET_XCOFF
36585 #ifndef HAVE_XCOFF_DWARF_EXTRAS
36586 #define HAVE_XCOFF_DWARF_EXTRAS 0
36587 #endif
36589 static enum unwind_info_type
36590 rs6000_xcoff_debug_unwind_info (void)
36592 return UI_NONE;
36595 static void
36596 rs6000_xcoff_asm_output_anchor (rtx symbol)
36598 char buffer[100];
36600 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
36601 SYMBOL_REF_BLOCK_OFFSET (symbol));
36602 fprintf (asm_out_file, "%s", SET_ASM_OP);
36603 RS6000_OUTPUT_BASENAME (asm_out_file, XSTR (symbol, 0));
36604 fprintf (asm_out_file, ",");
36605 RS6000_OUTPUT_BASENAME (asm_out_file, buffer);
36606 fprintf (asm_out_file, "\n");
36609 static void
36610 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
36612 fputs (GLOBAL_ASM_OP, stream);
36613 RS6000_OUTPUT_BASENAME (stream, name);
36614 putc ('\n', stream);
36617 /* A get_unnamed_decl callback, used for read-only sections. PTR
36618 points to the section string variable. */
36620 static void
36621 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
36623 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
36624 *(const char *const *) directive,
36625 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
36628 /* Likewise for read-write sections. */
36630 static void
36631 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
36633 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
36634 *(const char *const *) directive,
36635 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
36638 static void
36639 rs6000_xcoff_output_tls_section_asm_op (const void *directive)
36641 fprintf (asm_out_file, "\t.csect %s[TL],%s\n",
36642 *(const char *const *) directive,
36643 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
36646 /* A get_unnamed_section callback, used for switching to toc_section. */
36648 static void
36649 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
36651 if (TARGET_MINIMAL_TOC)
36653 /* toc_section is always selected at least once from
36654 rs6000_xcoff_file_start, so this is guaranteed to
36655 always be defined once and only once in each file. */
36656 if (!toc_initialized)
36658 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
36659 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
36660 toc_initialized = 1;
36662 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
36663 (TARGET_32BIT ? "" : ",3"));
36665 else
36666 fputs ("\t.toc\n", asm_out_file);
36669 /* Implement TARGET_ASM_INIT_SECTIONS. */
36671 static void
36672 rs6000_xcoff_asm_init_sections (void)
36674 read_only_data_section
36675 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
36676 &xcoff_read_only_section_name);
36678 private_data_section
36679 = get_unnamed_section (SECTION_WRITE,
36680 rs6000_xcoff_output_readwrite_section_asm_op,
36681 &xcoff_private_data_section_name);
36683 tls_data_section
36684 = get_unnamed_section (SECTION_TLS,
36685 rs6000_xcoff_output_tls_section_asm_op,
36686 &xcoff_tls_data_section_name);
36688 tls_private_data_section
36689 = get_unnamed_section (SECTION_TLS,
36690 rs6000_xcoff_output_tls_section_asm_op,
36691 &xcoff_private_data_section_name);
36693 read_only_private_data_section
36694 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
36695 &xcoff_private_data_section_name);
36697 toc_section
36698 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
36700 readonly_data_section = read_only_data_section;
36703 static int
36704 rs6000_xcoff_reloc_rw_mask (void)
36706 return 3;
36709 static void
36710 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
36711 tree decl ATTRIBUTE_UNUSED)
36713 int smclass;
36714 static const char * const suffix[5] = { "PR", "RO", "RW", "TL", "XO" };
36716 if (flags & SECTION_EXCLUDE)
36717 smclass = 4;
36718 else if (flags & SECTION_DEBUG)
36720 fprintf (asm_out_file, "\t.dwsect %s\n", name);
36721 return;
36723 else if (flags & SECTION_CODE)
36724 smclass = 0;
36725 else if (flags & SECTION_TLS)
36726 smclass = 3;
36727 else if (flags & SECTION_WRITE)
36728 smclass = 2;
36729 else
36730 smclass = 1;
36732 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
36733 (flags & SECTION_CODE) ? "." : "",
36734 name, suffix[smclass], flags & SECTION_ENTSIZE);
36737 #define IN_NAMED_SECTION(DECL) \
36738 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
36739 && DECL_SECTION_NAME (DECL) != NULL)
36741 static section *
36742 rs6000_xcoff_select_section (tree decl, int reloc,
36743 unsigned HOST_WIDE_INT align)
36745 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
36746 named section. */
36747 if (align > BIGGEST_ALIGNMENT)
36749 resolve_unique_section (decl, reloc, true);
36750 if (IN_NAMED_SECTION (decl))
36751 return get_named_section (decl, NULL, reloc);
36754 if (decl_readonly_section (decl, reloc))
36756 if (TREE_PUBLIC (decl))
36757 return read_only_data_section;
36758 else
36759 return read_only_private_data_section;
36761 else
36763 #if HAVE_AS_TLS
36764 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
36766 if (TREE_PUBLIC (decl))
36767 return tls_data_section;
36768 else if (bss_initializer_p (decl))
36770 /* Convert to COMMON to emit in BSS. */
36771 DECL_COMMON (decl) = 1;
36772 return tls_comm_section;
36774 else
36775 return tls_private_data_section;
36777 else
36778 #endif
36779 if (TREE_PUBLIC (decl))
36780 return data_section;
36781 else
36782 return private_data_section;
36786 static void
36787 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
36789 const char *name;
36791 /* Use select_section for private data and uninitialized data with
36792 alignment <= BIGGEST_ALIGNMENT. */
36793 if (!TREE_PUBLIC (decl)
36794 || DECL_COMMON (decl)
36795 || (DECL_INITIAL (decl) == NULL_TREE
36796 && DECL_ALIGN (decl) <= BIGGEST_ALIGNMENT)
36797 || DECL_INITIAL (decl) == error_mark_node
36798 || (flag_zero_initialized_in_bss
36799 && initializer_zerop (DECL_INITIAL (decl))))
36800 return;
36802 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
36803 name = (*targetm.strip_name_encoding) (name);
36804 set_decl_section_name (decl, name);
36807 /* Select section for constant in constant pool.
36809 On RS/6000, all constants are in the private read-only data area.
36810 However, if this is being placed in the TOC it must be output as a
36811 toc entry. */
36813 static section *
36814 rs6000_xcoff_select_rtx_section (machine_mode mode, rtx x,
36815 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
36817 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
36818 return toc_section;
36819 else
36820 return read_only_private_data_section;
36823 /* Remove any trailing [DS] or the like from the symbol name. */
36825 static const char *
36826 rs6000_xcoff_strip_name_encoding (const char *name)
36828 size_t len;
36829 if (*name == '*')
36830 name++;
36831 len = strlen (name);
36832 if (name[len - 1] == ']')
36833 return ggc_alloc_string (name, len - 4);
36834 else
36835 return name;
36838 /* Section attributes. AIX is always PIC. */
36840 static unsigned int
36841 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
36843 unsigned int align;
36844 unsigned int flags = default_section_type_flags (decl, name, reloc);
36846 /* Align to at least UNIT size. */
36847 if ((flags & SECTION_CODE) != 0 || !decl || !DECL_P (decl))
36848 align = MIN_UNITS_PER_WORD;
36849 else
36850 /* Increase alignment of large objects if not already stricter. */
36851 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
36852 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
36853 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
36855 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
36858 /* Output at beginning of assembler file.
36860 Initialize the section names for the RS/6000 at this point.
36862 Specify filename, including full path, to assembler.
36864 We want to go into the TOC section so at least one .toc will be emitted.
36865 Also, in order to output proper .bs/.es pairs, we need at least one static
36866 [RW] section emitted.
36868 Finally, declare mcount when profiling to make the assembler happy. */
36870 static void
36871 rs6000_xcoff_file_start (void)
36873 rs6000_gen_section_name (&xcoff_bss_section_name,
36874 main_input_filename, ".bss_");
36875 rs6000_gen_section_name (&xcoff_private_data_section_name,
36876 main_input_filename, ".rw_");
36877 rs6000_gen_section_name (&xcoff_read_only_section_name,
36878 main_input_filename, ".ro_");
36879 rs6000_gen_section_name (&xcoff_tls_data_section_name,
36880 main_input_filename, ".tls_");
36881 rs6000_gen_section_name (&xcoff_tbss_section_name,
36882 main_input_filename, ".tbss_[UL]");
36884 fputs ("\t.file\t", asm_out_file);
36885 output_quoted_string (asm_out_file, main_input_filename);
36886 fputc ('\n', asm_out_file);
36887 if (write_symbols != NO_DEBUG)
36888 switch_to_section (private_data_section);
36889 switch_to_section (toc_section);
36890 switch_to_section (text_section);
36891 if (profile_flag)
36892 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
36893 rs6000_file_start ();
36896 /* Output at end of assembler file.
36897 On the RS/6000, referencing data should automatically pull in text. */
36899 static void
36900 rs6000_xcoff_file_end (void)
36902 switch_to_section (text_section);
36903 fputs ("_section_.text:\n", asm_out_file);
36904 switch_to_section (data_section);
36905 fputs (TARGET_32BIT
36906 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
36907 asm_out_file);
36910 struct declare_alias_data
36912 FILE *file;
36913 bool function_descriptor;
36916 /* Declare alias N. A helper function for for_node_and_aliases. */
36918 static bool
36919 rs6000_declare_alias (struct symtab_node *n, void *d)
36921 struct declare_alias_data *data = (struct declare_alias_data *)d;
36922 /* Main symbol is output specially, because varasm machinery does part of
36923 the job for us - we do not need to declare .globl/lglobs and such. */
36924 if (!n->alias || n->weakref)
36925 return false;
36927 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n->decl)))
36928 return false;
36930 /* Prevent assemble_alias from trying to use .set pseudo operation
36931 that does not behave as expected by the middle-end. */
36932 TREE_ASM_WRITTEN (n->decl) = true;
36934 const char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n->decl));
36935 char *buffer = (char *) alloca (strlen (name) + 2);
36936 char *p;
36937 int dollar_inside = 0;
36939 strcpy (buffer, name);
36940 p = strchr (buffer, '$');
36941 while (p) {
36942 *p = '_';
36943 dollar_inside++;
36944 p = strchr (p + 1, '$');
36946 if (TREE_PUBLIC (n->decl))
36948 if (!RS6000_WEAK || !DECL_WEAK (n->decl))
36950 if (dollar_inside) {
36951 if (data->function_descriptor)
36952 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
36953 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
36955 if (data->function_descriptor)
36957 fputs ("\t.globl .", data->file);
36958 RS6000_OUTPUT_BASENAME (data->file, buffer);
36959 putc ('\n', data->file);
36961 fputs ("\t.globl ", data->file);
36962 RS6000_OUTPUT_BASENAME (data->file, buffer);
36963 putc ('\n', data->file);
36965 #ifdef ASM_WEAKEN_DECL
36966 else if (DECL_WEAK (n->decl) && !data->function_descriptor)
36967 ASM_WEAKEN_DECL (data->file, n->decl, name, NULL);
36968 #endif
36970 else
36972 if (dollar_inside)
36974 if (data->function_descriptor)
36975 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
36976 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
36978 if (data->function_descriptor)
36980 fputs ("\t.lglobl .", data->file);
36981 RS6000_OUTPUT_BASENAME (data->file, buffer);
36982 putc ('\n', data->file);
36984 fputs ("\t.lglobl ", data->file);
36985 RS6000_OUTPUT_BASENAME (data->file, buffer);
36986 putc ('\n', data->file);
36988 if (data->function_descriptor)
36989 fputs (".", data->file);
36990 RS6000_OUTPUT_BASENAME (data->file, buffer);
36991 fputs (":\n", data->file);
36992 return false;
36996 #ifdef HAVE_GAS_HIDDEN
36997 /* Helper function to calculate visibility of a DECL
36998 and return the value as a const string. */
37000 static const char *
37001 rs6000_xcoff_visibility (tree decl)
37003 static const char * const visibility_types[] = {
37004 "", ",protected", ",hidden", ",internal"
37007 enum symbol_visibility vis = DECL_VISIBILITY (decl);
37009 if (TREE_CODE (decl) == FUNCTION_DECL
37010 && cgraph_node::get (decl)
37011 && cgraph_node::get (decl)->instrumentation_clone
37012 && cgraph_node::get (decl)->instrumented_version)
37013 vis = DECL_VISIBILITY (cgraph_node::get (decl)->instrumented_version->decl);
37015 return visibility_types[vis];
37017 #endif
37020 /* This macro produces the initial definition of a function name.
37021 On the RS/6000, we need to place an extra '.' in the function name and
37022 output the function descriptor.
37023 Dollar signs are converted to underscores.
37025 The csect for the function will have already been created when
37026 text_section was selected. We do have to go back to that csect, however.
37028 The third and fourth parameters to the .function pseudo-op (16 and 044)
37029 are placeholders which no longer have any use.
37031 Because AIX assembler's .set command has unexpected semantics, we output
37032 all aliases as alternative labels in front of the definition. */
37034 void
37035 rs6000_xcoff_declare_function_name (FILE *file, const char *name, tree decl)
37037 char *buffer = (char *) alloca (strlen (name) + 1);
37038 char *p;
37039 int dollar_inside = 0;
37040 struct declare_alias_data data = {file, false};
37042 strcpy (buffer, name);
37043 p = strchr (buffer, '$');
37044 while (p) {
37045 *p = '_';
37046 dollar_inside++;
37047 p = strchr (p + 1, '$');
37049 if (TREE_PUBLIC (decl))
37051 if (!RS6000_WEAK || !DECL_WEAK (decl))
37053 if (dollar_inside) {
37054 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
37055 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
37057 fputs ("\t.globl .", file);
37058 RS6000_OUTPUT_BASENAME (file, buffer);
37059 #ifdef HAVE_GAS_HIDDEN
37060 fputs (rs6000_xcoff_visibility (decl), file);
37061 #endif
37062 putc ('\n', file);
37065 else
37067 if (dollar_inside) {
37068 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
37069 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
37071 fputs ("\t.lglobl .", file);
37072 RS6000_OUTPUT_BASENAME (file, buffer);
37073 putc ('\n', file);
37075 fputs ("\t.csect ", file);
37076 RS6000_OUTPUT_BASENAME (file, buffer);
37077 fputs (TARGET_32BIT ? "[DS]\n" : "[DS],3\n", file);
37078 RS6000_OUTPUT_BASENAME (file, buffer);
37079 fputs (":\n", file);
37080 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
37081 &data, true);
37082 fputs (TARGET_32BIT ? "\t.long ." : "\t.llong .", file);
37083 RS6000_OUTPUT_BASENAME (file, buffer);
37084 fputs (", TOC[tc0], 0\n", file);
37085 in_section = NULL;
37086 switch_to_section (function_section (decl));
37087 putc ('.', file);
37088 RS6000_OUTPUT_BASENAME (file, buffer);
37089 fputs (":\n", file);
37090 data.function_descriptor = true;
37091 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
37092 &data, true);
37093 if (!DECL_IGNORED_P (decl))
37095 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
37096 xcoffout_declare_function (file, decl, buffer);
37097 else if (write_symbols == DWARF2_DEBUG)
37099 name = (*targetm.strip_name_encoding) (name);
37100 fprintf (file, "\t.function .%s,.%s,2,0\n", name, name);
37103 return;
37107 /* Output assembly language to globalize a symbol from a DECL,
37108 possibly with visibility. */
37110 void
37111 rs6000_xcoff_asm_globalize_decl_name (FILE *stream, tree decl)
37113 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
37114 fputs (GLOBAL_ASM_OP, stream);
37115 RS6000_OUTPUT_BASENAME (stream, name);
37116 #ifdef HAVE_GAS_HIDDEN
37117 fputs (rs6000_xcoff_visibility (decl), stream);
37118 #endif
37119 putc ('\n', stream);
37122 /* Output assembly language to define a symbol as COMMON from a DECL,
37123 possibly with visibility. */
37125 void
37126 rs6000_xcoff_asm_output_aligned_decl_common (FILE *stream,
37127 tree decl ATTRIBUTE_UNUSED,
37128 const char *name,
37129 unsigned HOST_WIDE_INT size,
37130 unsigned HOST_WIDE_INT align)
37132 unsigned HOST_WIDE_INT align2 = 2;
37134 if (align > 32)
37135 align2 = floor_log2 (align / BITS_PER_UNIT);
37136 else if (size > 4)
37137 align2 = 3;
37139 fputs (COMMON_ASM_OP, stream);
37140 RS6000_OUTPUT_BASENAME (stream, name);
37142 fprintf (stream,
37143 "," HOST_WIDE_INT_PRINT_UNSIGNED "," HOST_WIDE_INT_PRINT_UNSIGNED,
37144 size, align2);
37146 #ifdef HAVE_GAS_HIDDEN
37147 fputs (rs6000_xcoff_visibility (decl), stream);
37148 #endif
37149 putc ('\n', stream);
37152 /* This macro produces the initial definition of a object (variable) name.
37153 Because AIX assembler's .set command has unexpected semantics, we output
37154 all aliases as alternative labels in front of the definition. */
37156 void
37157 rs6000_xcoff_declare_object_name (FILE *file, const char *name, tree decl)
37159 struct declare_alias_data data = {file, false};
37160 RS6000_OUTPUT_BASENAME (file, name);
37161 fputs (":\n", file);
37162 symtab_node::get_create (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
37163 &data, true);
37166 /* Overide the default 'SYMBOL-.' syntax with AIX compatible 'SYMBOL-$'. */
37168 void
37169 rs6000_asm_output_dwarf_pcrel (FILE *file, int size, const char *label)
37171 fputs (integer_asm_op (size, FALSE), file);
37172 assemble_name (file, label);
37173 fputs ("-$", file);
37176 /* Output a symbol offset relative to the dbase for the current object.
37177 We use __gcc_unwind_dbase as an arbitrary base for dbase and assume
37178 signed offsets.
37180 __gcc_unwind_dbase is embedded in all executables/libraries through
37181 libgcc/config/rs6000/crtdbase.S. */
37183 void
37184 rs6000_asm_output_dwarf_datarel (FILE *file, int size, const char *label)
37186 fputs (integer_asm_op (size, FALSE), file);
37187 assemble_name (file, label);
37188 fputs("-__gcc_unwind_dbase", file);
37191 #ifdef HAVE_AS_TLS
37192 static void
37193 rs6000_xcoff_encode_section_info (tree decl, rtx rtl, int first)
37195 rtx symbol;
37196 int flags;
37197 const char *symname;
37199 default_encode_section_info (decl, rtl, first);
37201 /* Careful not to prod global register variables. */
37202 if (!MEM_P (rtl))
37203 return;
37204 symbol = XEXP (rtl, 0);
37205 if (GET_CODE (symbol) != SYMBOL_REF)
37206 return;
37208 flags = SYMBOL_REF_FLAGS (symbol);
37210 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
37211 flags &= ~SYMBOL_FLAG_HAS_BLOCK_INFO;
37213 SYMBOL_REF_FLAGS (symbol) = flags;
37215 /* Append mapping class to extern decls. */
37216 symname = XSTR (symbol, 0);
37217 if (decl /* sync condition with assemble_external () */
37218 && DECL_P (decl) && DECL_EXTERNAL (decl) && TREE_PUBLIC (decl)
37219 && ((TREE_CODE (decl) == VAR_DECL && !DECL_THREAD_LOCAL_P (decl))
37220 || TREE_CODE (decl) == FUNCTION_DECL)
37221 && symname[strlen (symname) - 1] != ']')
37223 char *newname = (char *) alloca (strlen (symname) + 5);
37224 strcpy (newname, symname);
37225 strcat (newname, (TREE_CODE (decl) == FUNCTION_DECL
37226 ? "[DS]" : "[UA]"));
37227 XSTR (symbol, 0) = ggc_strdup (newname);
37230 #endif /* HAVE_AS_TLS */
37231 #endif /* TARGET_XCOFF */
37233 void
37234 rs6000_asm_weaken_decl (FILE *stream, tree decl,
37235 const char *name, const char *val)
37237 fputs ("\t.weak\t", stream);
37238 RS6000_OUTPUT_BASENAME (stream, name);
37239 if (decl && TREE_CODE (decl) == FUNCTION_DECL
37240 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
37242 if (TARGET_XCOFF)
37243 fputs ("[DS]", stream);
37244 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
37245 if (TARGET_XCOFF)
37246 fputs (rs6000_xcoff_visibility (decl), stream);
37247 #endif
37248 fputs ("\n\t.weak\t.", stream);
37249 RS6000_OUTPUT_BASENAME (stream, name);
37251 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
37252 if (TARGET_XCOFF)
37253 fputs (rs6000_xcoff_visibility (decl), stream);
37254 #endif
37255 fputc ('\n', stream);
37256 if (val)
37258 #ifdef ASM_OUTPUT_DEF
37259 ASM_OUTPUT_DEF (stream, name, val);
37260 #endif
37261 if (decl && TREE_CODE (decl) == FUNCTION_DECL
37262 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
37264 fputs ("\t.set\t.", stream);
37265 RS6000_OUTPUT_BASENAME (stream, name);
37266 fputs (",.", stream);
37267 RS6000_OUTPUT_BASENAME (stream, val);
37268 fputc ('\n', stream);
37274 /* Return true if INSN should not be copied. */
37276 static bool
37277 rs6000_cannot_copy_insn_p (rtx_insn *insn)
37279 return recog_memoized (insn) >= 0
37280 && get_attr_cannot_copy (insn);
37283 /* Compute a (partial) cost for rtx X. Return true if the complete
37284 cost has been computed, and false if subexpressions should be
37285 scanned. In either case, *TOTAL contains the cost result. */
37287 static bool
37288 rs6000_rtx_costs (rtx x, machine_mode mode, int outer_code,
37289 int opno ATTRIBUTE_UNUSED, int *total, bool speed)
37291 int code = GET_CODE (x);
37293 switch (code)
37295 /* On the RS/6000, if it is valid in the insn, it is free. */
37296 case CONST_INT:
37297 if (((outer_code == SET
37298 || outer_code == PLUS
37299 || outer_code == MINUS)
37300 && (satisfies_constraint_I (x)
37301 || satisfies_constraint_L (x)))
37302 || (outer_code == AND
37303 && (satisfies_constraint_K (x)
37304 || (mode == SImode
37305 ? satisfies_constraint_L (x)
37306 : satisfies_constraint_J (x))))
37307 || ((outer_code == IOR || outer_code == XOR)
37308 && (satisfies_constraint_K (x)
37309 || (mode == SImode
37310 ? satisfies_constraint_L (x)
37311 : satisfies_constraint_J (x))))
37312 || outer_code == ASHIFT
37313 || outer_code == ASHIFTRT
37314 || outer_code == LSHIFTRT
37315 || outer_code == ROTATE
37316 || outer_code == ROTATERT
37317 || outer_code == ZERO_EXTRACT
37318 || (outer_code == MULT
37319 && satisfies_constraint_I (x))
37320 || ((outer_code == DIV || outer_code == UDIV
37321 || outer_code == MOD || outer_code == UMOD)
37322 && exact_log2 (INTVAL (x)) >= 0)
37323 || (outer_code == COMPARE
37324 && (satisfies_constraint_I (x)
37325 || satisfies_constraint_K (x)))
37326 || ((outer_code == EQ || outer_code == NE)
37327 && (satisfies_constraint_I (x)
37328 || satisfies_constraint_K (x)
37329 || (mode == SImode
37330 ? satisfies_constraint_L (x)
37331 : satisfies_constraint_J (x))))
37332 || (outer_code == GTU
37333 && satisfies_constraint_I (x))
37334 || (outer_code == LTU
37335 && satisfies_constraint_P (x)))
37337 *total = 0;
37338 return true;
37340 else if ((outer_code == PLUS
37341 && reg_or_add_cint_operand (x, VOIDmode))
37342 || (outer_code == MINUS
37343 && reg_or_sub_cint_operand (x, VOIDmode))
37344 || ((outer_code == SET
37345 || outer_code == IOR
37346 || outer_code == XOR)
37347 && (INTVAL (x)
37348 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
37350 *total = COSTS_N_INSNS (1);
37351 return true;
37353 /* FALLTHRU */
37355 case CONST_DOUBLE:
37356 case CONST_WIDE_INT:
37357 case CONST:
37358 case HIGH:
37359 case SYMBOL_REF:
37360 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
37361 return true;
37363 case MEM:
37364 /* When optimizing for size, MEM should be slightly more expensive
37365 than generating address, e.g., (plus (reg) (const)).
37366 L1 cache latency is about two instructions. */
37367 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
37368 if (SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (x)))
37369 *total += COSTS_N_INSNS (100);
37370 return true;
37372 case LABEL_REF:
37373 *total = 0;
37374 return true;
37376 case PLUS:
37377 case MINUS:
37378 if (FLOAT_MODE_P (mode))
37379 *total = rs6000_cost->fp;
37380 else
37381 *total = COSTS_N_INSNS (1);
37382 return false;
37384 case MULT:
37385 if (GET_CODE (XEXP (x, 1)) == CONST_INT
37386 && satisfies_constraint_I (XEXP (x, 1)))
37388 if (INTVAL (XEXP (x, 1)) >= -256
37389 && INTVAL (XEXP (x, 1)) <= 255)
37390 *total = rs6000_cost->mulsi_const9;
37391 else
37392 *total = rs6000_cost->mulsi_const;
37394 else if (mode == SFmode)
37395 *total = rs6000_cost->fp;
37396 else if (FLOAT_MODE_P (mode))
37397 *total = rs6000_cost->dmul;
37398 else if (mode == DImode)
37399 *total = rs6000_cost->muldi;
37400 else
37401 *total = rs6000_cost->mulsi;
37402 return false;
37404 case FMA:
37405 if (mode == SFmode)
37406 *total = rs6000_cost->fp;
37407 else
37408 *total = rs6000_cost->dmul;
37409 break;
37411 case DIV:
37412 case MOD:
37413 if (FLOAT_MODE_P (mode))
37415 *total = mode == DFmode ? rs6000_cost->ddiv
37416 : rs6000_cost->sdiv;
37417 return false;
37419 /* FALLTHRU */
37421 case UDIV:
37422 case UMOD:
37423 if (GET_CODE (XEXP (x, 1)) == CONST_INT
37424 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
37426 if (code == DIV || code == MOD)
37427 /* Shift, addze */
37428 *total = COSTS_N_INSNS (2);
37429 else
37430 /* Shift */
37431 *total = COSTS_N_INSNS (1);
37433 else
37435 if (GET_MODE (XEXP (x, 1)) == DImode)
37436 *total = rs6000_cost->divdi;
37437 else
37438 *total = rs6000_cost->divsi;
37440 /* Add in shift and subtract for MOD unless we have a mod instruction. */
37441 if (!TARGET_MODULO && (code == MOD || code == UMOD))
37442 *total += COSTS_N_INSNS (2);
37443 return false;
37445 case CTZ:
37446 *total = COSTS_N_INSNS (TARGET_CTZ ? 1 : 4);
37447 return false;
37449 case FFS:
37450 *total = COSTS_N_INSNS (4);
37451 return false;
37453 case POPCOUNT:
37454 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
37455 return false;
37457 case PARITY:
37458 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
37459 return false;
37461 case NOT:
37462 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
37463 *total = 0;
37464 else
37465 *total = COSTS_N_INSNS (1);
37466 return false;
37468 case AND:
37469 if (CONST_INT_P (XEXP (x, 1)))
37471 rtx left = XEXP (x, 0);
37472 rtx_code left_code = GET_CODE (left);
37474 /* rotate-and-mask: 1 insn. */
37475 if ((left_code == ROTATE
37476 || left_code == ASHIFT
37477 || left_code == LSHIFTRT)
37478 && rs6000_is_valid_shift_mask (XEXP (x, 1), left, mode))
37480 *total = rtx_cost (XEXP (left, 0), mode, left_code, 0, speed);
37481 if (!CONST_INT_P (XEXP (left, 1)))
37482 *total += rtx_cost (XEXP (left, 1), SImode, left_code, 1, speed);
37483 *total += COSTS_N_INSNS (1);
37484 return true;
37487 /* rotate-and-mask (no rotate), andi., andis.: 1 insn. */
37488 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
37489 if (rs6000_is_valid_and_mask (XEXP (x, 1), mode)
37490 || (val & 0xffff) == val
37491 || (val & 0xffff0000) == val
37492 || ((val & 0xffff) == 0 && mode == SImode))
37494 *total = rtx_cost (left, mode, AND, 0, speed);
37495 *total += COSTS_N_INSNS (1);
37496 return true;
37499 /* 2 insns. */
37500 if (rs6000_is_valid_2insn_and (XEXP (x, 1), mode))
37502 *total = rtx_cost (left, mode, AND, 0, speed);
37503 *total += COSTS_N_INSNS (2);
37504 return true;
37508 *total = COSTS_N_INSNS (1);
37509 return false;
37511 case IOR:
37512 /* FIXME */
37513 *total = COSTS_N_INSNS (1);
37514 return true;
37516 case CLZ:
37517 case XOR:
37518 case ZERO_EXTRACT:
37519 *total = COSTS_N_INSNS (1);
37520 return false;
37522 case ASHIFT:
37523 /* The EXTSWSLI instruction is a combined instruction. Don't count both
37524 the sign extend and shift separately within the insn. */
37525 if (TARGET_EXTSWSLI && mode == DImode
37526 && GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
37527 && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode)
37529 *total = 0;
37530 return false;
37532 /* fall through */
37534 case ASHIFTRT:
37535 case LSHIFTRT:
37536 case ROTATE:
37537 case ROTATERT:
37538 /* Handle mul_highpart. */
37539 if (outer_code == TRUNCATE
37540 && GET_CODE (XEXP (x, 0)) == MULT)
37542 if (mode == DImode)
37543 *total = rs6000_cost->muldi;
37544 else
37545 *total = rs6000_cost->mulsi;
37546 return true;
37548 else if (outer_code == AND)
37549 *total = 0;
37550 else
37551 *total = COSTS_N_INSNS (1);
37552 return false;
37554 case SIGN_EXTEND:
37555 case ZERO_EXTEND:
37556 if (GET_CODE (XEXP (x, 0)) == MEM)
37557 *total = 0;
37558 else
37559 *total = COSTS_N_INSNS (1);
37560 return false;
37562 case COMPARE:
37563 case NEG:
37564 case ABS:
37565 if (!FLOAT_MODE_P (mode))
37567 *total = COSTS_N_INSNS (1);
37568 return false;
37570 /* FALLTHRU */
37572 case FLOAT:
37573 case UNSIGNED_FLOAT:
37574 case FIX:
37575 case UNSIGNED_FIX:
37576 case FLOAT_TRUNCATE:
37577 *total = rs6000_cost->fp;
37578 return false;
37580 case FLOAT_EXTEND:
37581 if (mode == DFmode)
37582 *total = rs6000_cost->sfdf_convert;
37583 else
37584 *total = rs6000_cost->fp;
37585 return false;
37587 case UNSPEC:
37588 switch (XINT (x, 1))
37590 case UNSPEC_FRSP:
37591 *total = rs6000_cost->fp;
37592 return true;
37594 default:
37595 break;
37597 break;
37599 case CALL:
37600 case IF_THEN_ELSE:
37601 if (!speed)
37603 *total = COSTS_N_INSNS (1);
37604 return true;
37606 else if (FLOAT_MODE_P (mode)
37607 && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS)
37609 *total = rs6000_cost->fp;
37610 return false;
37612 break;
37614 case NE:
37615 case EQ:
37616 case GTU:
37617 case LTU:
37618 /* Carry bit requires mode == Pmode.
37619 NEG or PLUS already counted so only add one. */
37620 if (mode == Pmode
37621 && (outer_code == NEG || outer_code == PLUS))
37623 *total = COSTS_N_INSNS (1);
37624 return true;
37626 if (outer_code == SET)
37628 if (XEXP (x, 1) == const0_rtx)
37630 if (TARGET_ISEL && !TARGET_MFCRF)
37631 *total = COSTS_N_INSNS (8);
37632 else
37633 *total = COSTS_N_INSNS (2);
37634 return true;
37636 else
37638 *total = COSTS_N_INSNS (3);
37639 return false;
37642 /* FALLTHRU */
37644 case GT:
37645 case LT:
37646 case UNORDERED:
37647 if (outer_code == SET && (XEXP (x, 1) == const0_rtx))
37649 if (TARGET_ISEL && !TARGET_MFCRF)
37650 *total = COSTS_N_INSNS (8);
37651 else
37652 *total = COSTS_N_INSNS (2);
37653 return true;
37655 /* CC COMPARE. */
37656 if (outer_code == COMPARE)
37658 *total = 0;
37659 return true;
37661 break;
37663 default:
37664 break;
37667 return false;
37670 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
37672 static bool
37673 rs6000_debug_rtx_costs (rtx x, machine_mode mode, int outer_code,
37674 int opno, int *total, bool speed)
37676 bool ret = rs6000_rtx_costs (x, mode, outer_code, opno, total, speed);
37678 fprintf (stderr,
37679 "\nrs6000_rtx_costs, return = %s, mode = %s, outer_code = %s, "
37680 "opno = %d, total = %d, speed = %s, x:\n",
37681 ret ? "complete" : "scan inner",
37682 GET_MODE_NAME (mode),
37683 GET_RTX_NAME (outer_code),
37684 opno,
37685 *total,
37686 speed ? "true" : "false");
37688 debug_rtx (x);
37690 return ret;
37693 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
37695 static int
37696 rs6000_debug_address_cost (rtx x, machine_mode mode,
37697 addr_space_t as, bool speed)
37699 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
37701 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
37702 ret, speed ? "true" : "false");
37703 debug_rtx (x);
37705 return ret;
37709 /* A C expression returning the cost of moving data from a register of class
37710 CLASS1 to one of CLASS2. */
37712 static int
37713 rs6000_register_move_cost (machine_mode mode,
37714 reg_class_t from, reg_class_t to)
37716 int ret;
37718 if (TARGET_DEBUG_COST)
37719 dbg_cost_ctrl++;
37721 /* Moves from/to GENERAL_REGS. */
37722 if (reg_classes_intersect_p (to, GENERAL_REGS)
37723 || reg_classes_intersect_p (from, GENERAL_REGS))
37725 reg_class_t rclass = from;
37727 if (! reg_classes_intersect_p (to, GENERAL_REGS))
37728 rclass = to;
37730 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
37731 ret = (rs6000_memory_move_cost (mode, rclass, false)
37732 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
37734 /* It's more expensive to move CR_REGS than CR0_REGS because of the
37735 shift. */
37736 else if (rclass == CR_REGS)
37737 ret = 4;
37739 /* For those processors that have slow LR/CTR moves, make them more
37740 expensive than memory in order to bias spills to memory .*/
37741 else if ((rs6000_cpu == PROCESSOR_POWER6
37742 || rs6000_cpu == PROCESSOR_POWER7
37743 || rs6000_cpu == PROCESSOR_POWER8
37744 || rs6000_cpu == PROCESSOR_POWER9)
37745 && reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
37746 ret = 6 * hard_regno_nregs[0][mode];
37748 else
37749 /* A move will cost one instruction per GPR moved. */
37750 ret = 2 * hard_regno_nregs[0][mode];
37753 /* If we have VSX, we can easily move between FPR or Altivec registers. */
37754 else if (VECTOR_MEM_VSX_P (mode)
37755 && reg_classes_intersect_p (to, VSX_REGS)
37756 && reg_classes_intersect_p (from, VSX_REGS))
37757 ret = 2 * hard_regno_nregs[FIRST_FPR_REGNO][mode];
37759 /* Moving between two similar registers is just one instruction. */
37760 else if (reg_classes_intersect_p (to, from))
37761 ret = (FLOAT128_2REG_P (mode)) ? 4 : 2;
37763 /* Everything else has to go through GENERAL_REGS. */
37764 else
37765 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
37766 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
37768 if (TARGET_DEBUG_COST)
37770 if (dbg_cost_ctrl == 1)
37771 fprintf (stderr,
37772 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
37773 ret, GET_MODE_NAME (mode), reg_class_names[from],
37774 reg_class_names[to]);
37775 dbg_cost_ctrl--;
37778 return ret;
37781 /* A C expressions returning the cost of moving data of MODE from a register to
37782 or from memory. */
37784 static int
37785 rs6000_memory_move_cost (machine_mode mode, reg_class_t rclass,
37786 bool in ATTRIBUTE_UNUSED)
37788 int ret;
37790 if (TARGET_DEBUG_COST)
37791 dbg_cost_ctrl++;
37793 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
37794 ret = 4 * hard_regno_nregs[0][mode];
37795 else if ((reg_classes_intersect_p (rclass, FLOAT_REGS)
37796 || reg_classes_intersect_p (rclass, VSX_REGS)))
37797 ret = 4 * hard_regno_nregs[32][mode];
37798 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
37799 ret = 4 * hard_regno_nregs[FIRST_ALTIVEC_REGNO][mode];
37800 else
37801 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
37803 if (TARGET_DEBUG_COST)
37805 if (dbg_cost_ctrl == 1)
37806 fprintf (stderr,
37807 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
37808 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
37809 dbg_cost_ctrl--;
37812 return ret;
37815 /* Returns a code for a target-specific builtin that implements
37816 reciprocal of the function, or NULL_TREE if not available. */
37818 static tree
37819 rs6000_builtin_reciprocal (tree fndecl)
37821 switch (DECL_FUNCTION_CODE (fndecl))
37823 case VSX_BUILTIN_XVSQRTDP:
37824 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
37825 return NULL_TREE;
37827 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
37829 case VSX_BUILTIN_XVSQRTSP:
37830 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
37831 return NULL_TREE;
37833 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
37835 default:
37836 return NULL_TREE;
37840 /* Load up a constant. If the mode is a vector mode, splat the value across
37841 all of the vector elements. */
37843 static rtx
37844 rs6000_load_constant_and_splat (machine_mode mode, REAL_VALUE_TYPE dconst)
37846 rtx reg;
37848 if (mode == SFmode || mode == DFmode)
37850 rtx d = const_double_from_real_value (dconst, mode);
37851 reg = force_reg (mode, d);
37853 else if (mode == V4SFmode)
37855 rtx d = const_double_from_real_value (dconst, SFmode);
37856 rtvec v = gen_rtvec (4, d, d, d, d);
37857 reg = gen_reg_rtx (mode);
37858 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
37860 else if (mode == V2DFmode)
37862 rtx d = const_double_from_real_value (dconst, DFmode);
37863 rtvec v = gen_rtvec (2, d, d);
37864 reg = gen_reg_rtx (mode);
37865 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
37867 else
37868 gcc_unreachable ();
37870 return reg;
37873 /* Generate an FMA instruction. */
37875 static void
37876 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
37878 machine_mode mode = GET_MODE (target);
37879 rtx dst;
37881 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
37882 gcc_assert (dst != NULL);
37884 if (dst != target)
37885 emit_move_insn (target, dst);
37888 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
37890 static void
37891 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
37893 machine_mode mode = GET_MODE (dst);
37894 rtx r;
37896 /* This is a tad more complicated, since the fnma_optab is for
37897 a different expression: fma(-m1, m2, a), which is the same
37898 thing except in the case of signed zeros.
37900 Fortunately we know that if FMA is supported that FNMSUB is
37901 also supported in the ISA. Just expand it directly. */
37903 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
37905 r = gen_rtx_NEG (mode, a);
37906 r = gen_rtx_FMA (mode, m1, m2, r);
37907 r = gen_rtx_NEG (mode, r);
37908 emit_insn (gen_rtx_SET (dst, r));
37911 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
37912 add a reg_note saying that this was a division. Support both scalar and
37913 vector divide. Assumes no trapping math and finite arguments. */
37915 void
37916 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
37918 machine_mode mode = GET_MODE (dst);
37919 rtx one, x0, e0, x1, xprev, eprev, xnext, enext, u, v;
37920 int i;
37922 /* Low precision estimates guarantee 5 bits of accuracy. High
37923 precision estimates guarantee 14 bits of accuracy. SFmode
37924 requires 23 bits of accuracy. DFmode requires 52 bits of
37925 accuracy. Each pass at least doubles the accuracy, leading
37926 to the following. */
37927 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
37928 if (mode == DFmode || mode == V2DFmode)
37929 passes++;
37931 enum insn_code code = optab_handler (smul_optab, mode);
37932 insn_gen_fn gen_mul = GEN_FCN (code);
37934 gcc_assert (code != CODE_FOR_nothing);
37936 one = rs6000_load_constant_and_splat (mode, dconst1);
37938 /* x0 = 1./d estimate */
37939 x0 = gen_reg_rtx (mode);
37940 emit_insn (gen_rtx_SET (x0, gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
37941 UNSPEC_FRES)));
37943 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
37944 if (passes > 1) {
37946 /* e0 = 1. - d * x0 */
37947 e0 = gen_reg_rtx (mode);
37948 rs6000_emit_nmsub (e0, d, x0, one);
37950 /* x1 = x0 + e0 * x0 */
37951 x1 = gen_reg_rtx (mode);
37952 rs6000_emit_madd (x1, e0, x0, x0);
37954 for (i = 0, xprev = x1, eprev = e0; i < passes - 2;
37955 ++i, xprev = xnext, eprev = enext) {
37957 /* enext = eprev * eprev */
37958 enext = gen_reg_rtx (mode);
37959 emit_insn (gen_mul (enext, eprev, eprev));
37961 /* xnext = xprev + enext * xprev */
37962 xnext = gen_reg_rtx (mode);
37963 rs6000_emit_madd (xnext, enext, xprev, xprev);
37966 } else
37967 xprev = x0;
37969 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
37971 /* u = n * xprev */
37972 u = gen_reg_rtx (mode);
37973 emit_insn (gen_mul (u, n, xprev));
37975 /* v = n - (d * u) */
37976 v = gen_reg_rtx (mode);
37977 rs6000_emit_nmsub (v, d, u, n);
37979 /* dst = (v * xprev) + u */
37980 rs6000_emit_madd (dst, v, xprev, u);
37982 if (note_p)
37983 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
37986 /* Goldschmidt's Algorithm for single/double-precision floating point
37987 sqrt and rsqrt. Assumes no trapping math and finite arguments. */
37989 void
37990 rs6000_emit_swsqrt (rtx dst, rtx src, bool recip)
37992 machine_mode mode = GET_MODE (src);
37993 rtx e = gen_reg_rtx (mode);
37994 rtx g = gen_reg_rtx (mode);
37995 rtx h = gen_reg_rtx (mode);
37997 /* Low precision estimates guarantee 5 bits of accuracy. High
37998 precision estimates guarantee 14 bits of accuracy. SFmode
37999 requires 23 bits of accuracy. DFmode requires 52 bits of
38000 accuracy. Each pass at least doubles the accuracy, leading
38001 to the following. */
38002 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
38003 if (mode == DFmode || mode == V2DFmode)
38004 passes++;
38006 int i;
38007 rtx mhalf;
38008 enum insn_code code = optab_handler (smul_optab, mode);
38009 insn_gen_fn gen_mul = GEN_FCN (code);
38011 gcc_assert (code != CODE_FOR_nothing);
38013 mhalf = rs6000_load_constant_and_splat (mode, dconsthalf);
38015 /* e = rsqrt estimate */
38016 emit_insn (gen_rtx_SET (e, gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
38017 UNSPEC_RSQRT)));
38019 /* If (src == 0.0) filter infinity to prevent NaN for sqrt(0.0). */
38020 if (!recip)
38022 rtx zero = force_reg (mode, CONST0_RTX (mode));
38024 if (mode == SFmode)
38026 rtx target = emit_conditional_move (e, GT, src, zero, mode,
38027 e, zero, mode, 0);
38028 if (target != e)
38029 emit_move_insn (e, target);
38031 else
38033 rtx cond = gen_rtx_GT (VOIDmode, e, zero);
38034 rs6000_emit_vector_cond_expr (e, e, zero, cond, src, zero);
38038 /* g = sqrt estimate. */
38039 emit_insn (gen_mul (g, e, src));
38040 /* h = 1/(2*sqrt) estimate. */
38041 emit_insn (gen_mul (h, e, mhalf));
38043 if (recip)
38045 if (passes == 1)
38047 rtx t = gen_reg_rtx (mode);
38048 rs6000_emit_nmsub (t, g, h, mhalf);
38049 /* Apply correction directly to 1/rsqrt estimate. */
38050 rs6000_emit_madd (dst, e, t, e);
38052 else
38054 for (i = 0; i < passes; i++)
38056 rtx t1 = gen_reg_rtx (mode);
38057 rtx g1 = gen_reg_rtx (mode);
38058 rtx h1 = gen_reg_rtx (mode);
38060 rs6000_emit_nmsub (t1, g, h, mhalf);
38061 rs6000_emit_madd (g1, g, t1, g);
38062 rs6000_emit_madd (h1, h, t1, h);
38064 g = g1;
38065 h = h1;
38067 /* Multiply by 2 for 1/rsqrt. */
38068 emit_insn (gen_add3_insn (dst, h, h));
38071 else
38073 rtx t = gen_reg_rtx (mode);
38074 rs6000_emit_nmsub (t, g, h, mhalf);
38075 rs6000_emit_madd (dst, g, t, g);
38078 return;
38081 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
38082 (Power7) targets. DST is the target, and SRC is the argument operand. */
38084 void
38085 rs6000_emit_popcount (rtx dst, rtx src)
38087 machine_mode mode = GET_MODE (dst);
38088 rtx tmp1, tmp2;
38090 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
38091 if (TARGET_POPCNTD)
38093 if (mode == SImode)
38094 emit_insn (gen_popcntdsi2 (dst, src));
38095 else
38096 emit_insn (gen_popcntddi2 (dst, src));
38097 return;
38100 tmp1 = gen_reg_rtx (mode);
38102 if (mode == SImode)
38104 emit_insn (gen_popcntbsi2 (tmp1, src));
38105 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
38106 NULL_RTX, 0);
38107 tmp2 = force_reg (SImode, tmp2);
38108 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
38110 else
38112 emit_insn (gen_popcntbdi2 (tmp1, src));
38113 tmp2 = expand_mult (DImode, tmp1,
38114 GEN_INT ((HOST_WIDE_INT)
38115 0x01010101 << 32 | 0x01010101),
38116 NULL_RTX, 0);
38117 tmp2 = force_reg (DImode, tmp2);
38118 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
38123 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
38124 target, and SRC is the argument operand. */
38126 void
38127 rs6000_emit_parity (rtx dst, rtx src)
38129 machine_mode mode = GET_MODE (dst);
38130 rtx tmp;
38132 tmp = gen_reg_rtx (mode);
38134 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
38135 if (TARGET_CMPB)
38137 if (mode == SImode)
38139 emit_insn (gen_popcntbsi2 (tmp, src));
38140 emit_insn (gen_paritysi2_cmpb (dst, tmp));
38142 else
38144 emit_insn (gen_popcntbdi2 (tmp, src));
38145 emit_insn (gen_paritydi2_cmpb (dst, tmp));
38147 return;
38150 if (mode == SImode)
38152 /* Is mult+shift >= shift+xor+shift+xor? */
38153 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
38155 rtx tmp1, tmp2, tmp3, tmp4;
38157 tmp1 = gen_reg_rtx (SImode);
38158 emit_insn (gen_popcntbsi2 (tmp1, src));
38160 tmp2 = gen_reg_rtx (SImode);
38161 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
38162 tmp3 = gen_reg_rtx (SImode);
38163 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
38165 tmp4 = gen_reg_rtx (SImode);
38166 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
38167 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
38169 else
38170 rs6000_emit_popcount (tmp, src);
38171 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
38173 else
38175 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
38176 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
38178 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
38180 tmp1 = gen_reg_rtx (DImode);
38181 emit_insn (gen_popcntbdi2 (tmp1, src));
38183 tmp2 = gen_reg_rtx (DImode);
38184 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
38185 tmp3 = gen_reg_rtx (DImode);
38186 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
38188 tmp4 = gen_reg_rtx (DImode);
38189 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
38190 tmp5 = gen_reg_rtx (DImode);
38191 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
38193 tmp6 = gen_reg_rtx (DImode);
38194 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
38195 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
38197 else
38198 rs6000_emit_popcount (tmp, src);
38199 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
38203 /* Expand an Altivec constant permutation for little endian mode.
38204 There are two issues: First, the two input operands must be
38205 swapped so that together they form a double-wide array in LE
38206 order. Second, the vperm instruction has surprising behavior
38207 in LE mode: it interprets the elements of the source vectors
38208 in BE mode ("left to right") and interprets the elements of
38209 the destination vector in LE mode ("right to left"). To
38210 correct for this, we must subtract each element of the permute
38211 control vector from 31.
38213 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
38214 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
38215 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
38216 serve as the permute control vector. Then, in BE mode,
38218 vperm 9,10,11,12
38220 places the desired result in vr9. However, in LE mode the
38221 vector contents will be
38223 vr10 = 00000003 00000002 00000001 00000000
38224 vr11 = 00000007 00000006 00000005 00000004
38226 The result of the vperm using the same permute control vector is
38228 vr9 = 05000000 07000000 01000000 03000000
38230 That is, the leftmost 4 bytes of vr10 are interpreted as the
38231 source for the rightmost 4 bytes of vr9, and so on.
38233 If we change the permute control vector to
38235 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
38237 and issue
38239 vperm 9,11,10,12
38241 we get the desired
38243 vr9 = 00000006 00000004 00000002 00000000. */
38245 void
38246 altivec_expand_vec_perm_const_le (rtx operands[4])
38248 unsigned int i;
38249 rtx perm[16];
38250 rtx constv, unspec;
38251 rtx target = operands[0];
38252 rtx op0 = operands[1];
38253 rtx op1 = operands[2];
38254 rtx sel = operands[3];
38256 /* Unpack and adjust the constant selector. */
38257 for (i = 0; i < 16; ++i)
38259 rtx e = XVECEXP (sel, 0, i);
38260 unsigned int elt = 31 - (INTVAL (e) & 31);
38261 perm[i] = GEN_INT (elt);
38264 /* Expand to a permute, swapping the inputs and using the
38265 adjusted selector. */
38266 if (!REG_P (op0))
38267 op0 = force_reg (V16QImode, op0);
38268 if (!REG_P (op1))
38269 op1 = force_reg (V16QImode, op1);
38271 constv = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm));
38272 constv = force_reg (V16QImode, constv);
38273 unspec = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, op1, op0, constv),
38274 UNSPEC_VPERM);
38275 if (!REG_P (target))
38277 rtx tmp = gen_reg_rtx (V16QImode);
38278 emit_move_insn (tmp, unspec);
38279 unspec = tmp;
38282 emit_move_insn (target, unspec);
38285 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
38286 permute control vector. But here it's not a constant, so we must
38287 generate a vector NAND or NOR to do the adjustment. */
38289 void
38290 altivec_expand_vec_perm_le (rtx operands[4])
38292 rtx notx, iorx, unspec;
38293 rtx target = operands[0];
38294 rtx op0 = operands[1];
38295 rtx op1 = operands[2];
38296 rtx sel = operands[3];
38297 rtx tmp = target;
38298 rtx norreg = gen_reg_rtx (V16QImode);
38299 machine_mode mode = GET_MODE (target);
38301 /* Get everything in regs so the pattern matches. */
38302 if (!REG_P (op0))
38303 op0 = force_reg (mode, op0);
38304 if (!REG_P (op1))
38305 op1 = force_reg (mode, op1);
38306 if (!REG_P (sel))
38307 sel = force_reg (V16QImode, sel);
38308 if (!REG_P (target))
38309 tmp = gen_reg_rtx (mode);
38311 if (TARGET_P9_VECTOR)
38313 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op0, op1, sel),
38314 UNSPEC_VPERMR);
38316 else
38318 /* Invert the selector with a VNAND if available, else a VNOR.
38319 The VNAND is preferred for future fusion opportunities. */
38320 notx = gen_rtx_NOT (V16QImode, sel);
38321 iorx = (TARGET_P8_VECTOR
38322 ? gen_rtx_IOR (V16QImode, notx, notx)
38323 : gen_rtx_AND (V16QImode, notx, notx));
38324 emit_insn (gen_rtx_SET (norreg, iorx));
38326 /* Permute with operands reversed and adjusted selector. */
38327 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, norreg),
38328 UNSPEC_VPERM);
38331 /* Copy into target, possibly by way of a register. */
38332 if (!REG_P (target))
38334 emit_move_insn (tmp, unspec);
38335 unspec = tmp;
38338 emit_move_insn (target, unspec);
38341 /* Expand an Altivec constant permutation. Return true if we match
38342 an efficient implementation; false to fall back to VPERM. */
38344 bool
38345 altivec_expand_vec_perm_const (rtx operands[4])
38347 struct altivec_perm_insn {
38348 HOST_WIDE_INT mask;
38349 enum insn_code impl;
38350 unsigned char perm[16];
38352 static const struct altivec_perm_insn patterns[] = {
38353 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum_direct,
38354 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
38355 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum_direct,
38356 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
38357 { OPTION_MASK_ALTIVEC,
38358 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghb_direct
38359 : CODE_FOR_altivec_vmrglb_direct),
38360 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
38361 { OPTION_MASK_ALTIVEC,
38362 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghh_direct
38363 : CODE_FOR_altivec_vmrglh_direct),
38364 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
38365 { OPTION_MASK_ALTIVEC,
38366 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghw_direct
38367 : CODE_FOR_altivec_vmrglw_direct),
38368 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
38369 { OPTION_MASK_ALTIVEC,
38370 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglb_direct
38371 : CODE_FOR_altivec_vmrghb_direct),
38372 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
38373 { OPTION_MASK_ALTIVEC,
38374 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglh_direct
38375 : CODE_FOR_altivec_vmrghh_direct),
38376 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
38377 { OPTION_MASK_ALTIVEC,
38378 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglw_direct
38379 : CODE_FOR_altivec_vmrghw_direct),
38380 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
38381 { OPTION_MASK_P8_VECTOR, CODE_FOR_p8_vmrgew,
38382 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
38383 { OPTION_MASK_P8_VECTOR, CODE_FOR_p8_vmrgow,
38384 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
38387 unsigned int i, j, elt, which;
38388 unsigned char perm[16];
38389 rtx target, op0, op1, sel, x;
38390 bool one_vec;
38392 target = operands[0];
38393 op0 = operands[1];
38394 op1 = operands[2];
38395 sel = operands[3];
38397 /* Unpack the constant selector. */
38398 for (i = which = 0; i < 16; ++i)
38400 rtx e = XVECEXP (sel, 0, i);
38401 elt = INTVAL (e) & 31;
38402 which |= (elt < 16 ? 1 : 2);
38403 perm[i] = elt;
38406 /* Simplify the constant selector based on operands. */
38407 switch (which)
38409 default:
38410 gcc_unreachable ();
38412 case 3:
38413 one_vec = false;
38414 if (!rtx_equal_p (op0, op1))
38415 break;
38416 /* FALLTHRU */
38418 case 2:
38419 for (i = 0; i < 16; ++i)
38420 perm[i] &= 15;
38421 op0 = op1;
38422 one_vec = true;
38423 break;
38425 case 1:
38426 op1 = op0;
38427 one_vec = true;
38428 break;
38431 /* Look for splat patterns. */
38432 if (one_vec)
38434 elt = perm[0];
38436 for (i = 0; i < 16; ++i)
38437 if (perm[i] != elt)
38438 break;
38439 if (i == 16)
38441 if (!BYTES_BIG_ENDIAN)
38442 elt = 15 - elt;
38443 emit_insn (gen_altivec_vspltb_direct (target, op0, GEN_INT (elt)));
38444 return true;
38447 if (elt % 2 == 0)
38449 for (i = 0; i < 16; i += 2)
38450 if (perm[i] != elt || perm[i + 1] != elt + 1)
38451 break;
38452 if (i == 16)
38454 int field = BYTES_BIG_ENDIAN ? elt / 2 : 7 - elt / 2;
38455 x = gen_reg_rtx (V8HImode);
38456 emit_insn (gen_altivec_vsplth_direct (x, gen_lowpart (V8HImode, op0),
38457 GEN_INT (field)));
38458 emit_move_insn (target, gen_lowpart (V16QImode, x));
38459 return true;
38463 if (elt % 4 == 0)
38465 for (i = 0; i < 16; i += 4)
38466 if (perm[i] != elt
38467 || perm[i + 1] != elt + 1
38468 || perm[i + 2] != elt + 2
38469 || perm[i + 3] != elt + 3)
38470 break;
38471 if (i == 16)
38473 int field = BYTES_BIG_ENDIAN ? elt / 4 : 3 - elt / 4;
38474 x = gen_reg_rtx (V4SImode);
38475 emit_insn (gen_altivec_vspltw_direct (x, gen_lowpart (V4SImode, op0),
38476 GEN_INT (field)));
38477 emit_move_insn (target, gen_lowpart (V16QImode, x));
38478 return true;
38483 /* Look for merge and pack patterns. */
38484 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
38486 bool swapped;
38488 if ((patterns[j].mask & rs6000_isa_flags) == 0)
38489 continue;
38491 elt = patterns[j].perm[0];
38492 if (perm[0] == elt)
38493 swapped = false;
38494 else if (perm[0] == elt + 16)
38495 swapped = true;
38496 else
38497 continue;
38498 for (i = 1; i < 16; ++i)
38500 elt = patterns[j].perm[i];
38501 if (swapped)
38502 elt = (elt >= 16 ? elt - 16 : elt + 16);
38503 else if (one_vec && elt >= 16)
38504 elt -= 16;
38505 if (perm[i] != elt)
38506 break;
38508 if (i == 16)
38510 enum insn_code icode = patterns[j].impl;
38511 machine_mode omode = insn_data[icode].operand[0].mode;
38512 machine_mode imode = insn_data[icode].operand[1].mode;
38514 /* For little-endian, don't use vpkuwum and vpkuhum if the
38515 underlying vector type is not V4SI and V8HI, respectively.
38516 For example, using vpkuwum with a V8HI picks up the even
38517 halfwords (BE numbering) when the even halfwords (LE
38518 numbering) are what we need. */
38519 if (!BYTES_BIG_ENDIAN
38520 && icode == CODE_FOR_altivec_vpkuwum_direct
38521 && ((GET_CODE (op0) == REG
38522 && GET_MODE (op0) != V4SImode)
38523 || (GET_CODE (op0) == SUBREG
38524 && GET_MODE (XEXP (op0, 0)) != V4SImode)))
38525 continue;
38526 if (!BYTES_BIG_ENDIAN
38527 && icode == CODE_FOR_altivec_vpkuhum_direct
38528 && ((GET_CODE (op0) == REG
38529 && GET_MODE (op0) != V8HImode)
38530 || (GET_CODE (op0) == SUBREG
38531 && GET_MODE (XEXP (op0, 0)) != V8HImode)))
38532 continue;
38534 /* For little-endian, the two input operands must be swapped
38535 (or swapped back) to ensure proper right-to-left numbering
38536 from 0 to 2N-1. */
38537 if (swapped ^ !BYTES_BIG_ENDIAN)
38538 std::swap (op0, op1);
38539 if (imode != V16QImode)
38541 op0 = gen_lowpart (imode, op0);
38542 op1 = gen_lowpart (imode, op1);
38544 if (omode == V16QImode)
38545 x = target;
38546 else
38547 x = gen_reg_rtx (omode);
38548 emit_insn (GEN_FCN (icode) (x, op0, op1));
38549 if (omode != V16QImode)
38550 emit_move_insn (target, gen_lowpart (V16QImode, x));
38551 return true;
38555 if (!BYTES_BIG_ENDIAN)
38557 altivec_expand_vec_perm_const_le (operands);
38558 return true;
38561 return false;
38564 /* Expand a Paired Single, VSX Permute Doubleword, or SPE constant permutation.
38565 Return true if we match an efficient implementation. */
38567 static bool
38568 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
38569 unsigned char perm0, unsigned char perm1)
38571 rtx x;
38573 /* If both selectors come from the same operand, fold to single op. */
38574 if ((perm0 & 2) == (perm1 & 2))
38576 if (perm0 & 2)
38577 op0 = op1;
38578 else
38579 op1 = op0;
38581 /* If both operands are equal, fold to simpler permutation. */
38582 if (rtx_equal_p (op0, op1))
38584 perm0 = perm0 & 1;
38585 perm1 = (perm1 & 1) + 2;
38587 /* If the first selector comes from the second operand, swap. */
38588 else if (perm0 & 2)
38590 if (perm1 & 2)
38591 return false;
38592 perm0 -= 2;
38593 perm1 += 2;
38594 std::swap (op0, op1);
38596 /* If the second selector does not come from the second operand, fail. */
38597 else if ((perm1 & 2) == 0)
38598 return false;
38600 /* Success! */
38601 if (target != NULL)
38603 machine_mode vmode, dmode;
38604 rtvec v;
38606 vmode = GET_MODE (target);
38607 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
38608 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4);
38609 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
38610 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
38611 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
38612 emit_insn (gen_rtx_SET (target, x));
38614 return true;
38617 bool
38618 rs6000_expand_vec_perm_const (rtx operands[4])
38620 rtx target, op0, op1, sel;
38621 unsigned char perm0, perm1;
38623 target = operands[0];
38624 op0 = operands[1];
38625 op1 = operands[2];
38626 sel = operands[3];
38628 /* Unpack the constant selector. */
38629 perm0 = INTVAL (XVECEXP (sel, 0, 0)) & 3;
38630 perm1 = INTVAL (XVECEXP (sel, 0, 1)) & 3;
38632 return rs6000_expand_vec_perm_const_1 (target, op0, op1, perm0, perm1);
38635 /* Test whether a constant permutation is supported. */
38637 static bool
38638 rs6000_vectorize_vec_perm_const_ok (machine_mode vmode,
38639 const unsigned char *sel)
38641 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
38642 if (TARGET_ALTIVEC)
38643 return true;
38645 /* Check for ps_merge* or evmerge* insns. */
38646 if ((TARGET_PAIRED_FLOAT && vmode == V2SFmode)
38647 || (TARGET_SPE && vmode == V2SImode))
38649 rtx op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
38650 rtx op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
38651 return rs6000_expand_vec_perm_const_1 (NULL, op0, op1, sel[0], sel[1]);
38654 return false;
38657 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave. */
38659 static void
38660 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
38661 machine_mode vmode, unsigned nelt, rtx perm[])
38663 machine_mode imode;
38664 rtx x;
38666 imode = vmode;
38667 if (GET_MODE_CLASS (vmode) != MODE_VECTOR_INT)
38669 imode = mode_for_size (GET_MODE_UNIT_BITSIZE (vmode), MODE_INT, 0);
38670 imode = mode_for_vector (imode, nelt);
38673 x = gen_rtx_CONST_VECTOR (imode, gen_rtvec_v (nelt, perm));
38674 x = expand_vec_perm (vmode, op0, op1, x, target);
38675 if (x != target)
38676 emit_move_insn (target, x);
38679 /* Expand an extract even operation. */
38681 void
38682 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
38684 machine_mode vmode = GET_MODE (target);
38685 unsigned i, nelt = GET_MODE_NUNITS (vmode);
38686 rtx perm[16];
38688 for (i = 0; i < nelt; i++)
38689 perm[i] = GEN_INT (i * 2);
38691 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
38694 /* Expand a vector interleave operation. */
38696 void
38697 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
38699 machine_mode vmode = GET_MODE (target);
38700 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
38701 rtx perm[16];
38703 high = (highp ? 0 : nelt / 2);
38704 for (i = 0; i < nelt / 2; i++)
38706 perm[i * 2] = GEN_INT (i + high);
38707 perm[i * 2 + 1] = GEN_INT (i + nelt + high);
38710 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
38713 /* Scale a V2DF vector SRC by two to the SCALE and place in TGT. */
38714 void
38715 rs6000_scale_v2df (rtx tgt, rtx src, int scale)
38717 HOST_WIDE_INT hwi_scale (scale);
38718 REAL_VALUE_TYPE r_pow;
38719 rtvec v = rtvec_alloc (2);
38720 rtx elt;
38721 rtx scale_vec = gen_reg_rtx (V2DFmode);
38722 (void)real_powi (&r_pow, DFmode, &dconst2, hwi_scale);
38723 elt = const_double_from_real_value (r_pow, DFmode);
38724 RTVEC_ELT (v, 0) = elt;
38725 RTVEC_ELT (v, 1) = elt;
38726 rs6000_expand_vector_init (scale_vec, gen_rtx_PARALLEL (V2DFmode, v));
38727 emit_insn (gen_mulv2df3 (tgt, src, scale_vec));
38730 /* Return an RTX representing where to find the function value of a
38731 function returning MODE. */
38732 static rtx
38733 rs6000_complex_function_value (machine_mode mode)
38735 unsigned int regno;
38736 rtx r1, r2;
38737 machine_mode inner = GET_MODE_INNER (mode);
38738 unsigned int inner_bytes = GET_MODE_UNIT_SIZE (mode);
38740 if (TARGET_FLOAT128_TYPE
38741 && (mode == KCmode
38742 || (mode == TCmode && TARGET_IEEEQUAD)))
38743 regno = ALTIVEC_ARG_RETURN;
38745 else if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
38746 regno = FP_ARG_RETURN;
38748 else
38750 regno = GP_ARG_RETURN;
38752 /* 32-bit is OK since it'll go in r3/r4. */
38753 if (TARGET_32BIT && inner_bytes >= 4)
38754 return gen_rtx_REG (mode, regno);
38757 if (inner_bytes >= 8)
38758 return gen_rtx_REG (mode, regno);
38760 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
38761 const0_rtx);
38762 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
38763 GEN_INT (inner_bytes));
38764 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
38767 /* Return an rtx describing a return value of MODE as a PARALLEL
38768 in N_ELTS registers, each of mode ELT_MODE, starting at REGNO,
38769 stride REG_STRIDE. */
38771 static rtx
38772 rs6000_parallel_return (machine_mode mode,
38773 int n_elts, machine_mode elt_mode,
38774 unsigned int regno, unsigned int reg_stride)
38776 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
38778 int i;
38779 for (i = 0; i < n_elts; i++)
38781 rtx r = gen_rtx_REG (elt_mode, regno);
38782 rtx off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
38783 XVECEXP (par, 0, i) = gen_rtx_EXPR_LIST (VOIDmode, r, off);
38784 regno += reg_stride;
38787 return par;
38790 /* Target hook for TARGET_FUNCTION_VALUE.
38792 On the SPE, both FPs and vectors are returned in r3.
38794 On RS/6000 an integer value is in r3 and a floating-point value is in
38795 fp1, unless -msoft-float. */
38797 static rtx
38798 rs6000_function_value (const_tree valtype,
38799 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
38800 bool outgoing ATTRIBUTE_UNUSED)
38802 machine_mode mode;
38803 unsigned int regno;
38804 machine_mode elt_mode;
38805 int n_elts;
38807 /* Special handling for structs in darwin64. */
38808 if (TARGET_MACHO
38809 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
38811 CUMULATIVE_ARGS valcum;
38812 rtx valret;
38814 valcum.words = 0;
38815 valcum.fregno = FP_ARG_MIN_REG;
38816 valcum.vregno = ALTIVEC_ARG_MIN_REG;
38817 /* Do a trial code generation as if this were going to be passed as
38818 an argument; if any part goes in memory, we return NULL. */
38819 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
38820 if (valret)
38821 return valret;
38822 /* Otherwise fall through to standard ABI rules. */
38825 mode = TYPE_MODE (valtype);
38827 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
38828 if (rs6000_discover_homogeneous_aggregate (mode, valtype, &elt_mode, &n_elts))
38830 int first_reg, n_regs;
38832 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (elt_mode))
38834 /* _Decimal128 must use even/odd register pairs. */
38835 first_reg = (elt_mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
38836 n_regs = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
38838 else
38840 first_reg = ALTIVEC_ARG_RETURN;
38841 n_regs = 1;
38844 return rs6000_parallel_return (mode, n_elts, elt_mode, first_reg, n_regs);
38847 /* Some return value types need be split in -mpowerpc64, 32bit ABI. */
38848 if (TARGET_32BIT && TARGET_POWERPC64)
38849 switch (mode)
38851 default:
38852 break;
38853 case DImode:
38854 case SCmode:
38855 case DCmode:
38856 case TCmode:
38857 int count = GET_MODE_SIZE (mode) / 4;
38858 return rs6000_parallel_return (mode, count, SImode, GP_ARG_RETURN, 1);
38861 if ((INTEGRAL_TYPE_P (valtype)
38862 && GET_MODE_BITSIZE (mode) < (TARGET_32BIT ? 32 : 64))
38863 || POINTER_TYPE_P (valtype))
38864 mode = TARGET_32BIT ? SImode : DImode;
38866 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
38867 /* _Decimal128 must use an even/odd register pair. */
38868 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
38869 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT && TARGET_FPRS
38870 && !FLOAT128_VECTOR_P (mode)
38871 && ((TARGET_SINGLE_FLOAT && (mode == SFmode)) || TARGET_DOUBLE_FLOAT))
38872 regno = FP_ARG_RETURN;
38873 else if (TREE_CODE (valtype) == COMPLEX_TYPE
38874 && targetm.calls.split_complex_arg)
38875 return rs6000_complex_function_value (mode);
38876 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
38877 return register is used in both cases, and we won't see V2DImode/V2DFmode
38878 for pure altivec, combine the two cases. */
38879 else if ((TREE_CODE (valtype) == VECTOR_TYPE || FLOAT128_VECTOR_P (mode))
38880 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
38881 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
38882 regno = ALTIVEC_ARG_RETURN;
38883 else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
38884 && (mode == DFmode || mode == DCmode
38885 || FLOAT128_IBM_P (mode) || mode == TCmode))
38886 return spe_build_register_parallel (mode, GP_ARG_RETURN);
38887 else
38888 regno = GP_ARG_RETURN;
38890 return gen_rtx_REG (mode, regno);
38893 /* Define how to find the value returned by a library function
38894 assuming the value has mode MODE. */
38896 rs6000_libcall_value (machine_mode mode)
38898 unsigned int regno;
38900 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
38901 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
38902 return rs6000_parallel_return (mode, 2, SImode, GP_ARG_RETURN, 1);
38904 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
38905 /* _Decimal128 must use an even/odd register pair. */
38906 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
38907 else if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode)
38908 && TARGET_HARD_FLOAT && TARGET_FPRS
38909 && ((TARGET_SINGLE_FLOAT && mode == SFmode) || TARGET_DOUBLE_FLOAT))
38910 regno = FP_ARG_RETURN;
38911 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
38912 return register is used in both cases, and we won't see V2DImode/V2DFmode
38913 for pure altivec, combine the two cases. */
38914 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
38915 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
38916 regno = ALTIVEC_ARG_RETURN;
38917 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
38918 return rs6000_complex_function_value (mode);
38919 else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
38920 && (mode == DFmode || mode == DCmode
38921 || FLOAT128_IBM_P (mode) || mode == TCmode))
38922 return spe_build_register_parallel (mode, GP_ARG_RETURN);
38923 else
38924 regno = GP_ARG_RETURN;
38926 return gen_rtx_REG (mode, regno);
38930 /* Return true if we use LRA instead of reload pass. */
38931 static bool
38932 rs6000_lra_p (void)
38934 return TARGET_LRA;
38937 /* Compute register pressure classes. We implement the target hook to avoid
38938 IRA picking something like NON_SPECIAL_REGS as a pressure class, which can
38939 lead to incorrect estimates of number of available registers and therefor
38940 increased register pressure/spill. */
38941 static int
38942 rs6000_compute_pressure_classes (enum reg_class *pressure_classes)
38944 int n;
38946 n = 0;
38947 pressure_classes[n++] = GENERAL_REGS;
38948 if (TARGET_VSX)
38949 pressure_classes[n++] = VSX_REGS;
38950 else
38952 if (TARGET_ALTIVEC)
38953 pressure_classes[n++] = ALTIVEC_REGS;
38954 if (TARGET_HARD_FLOAT && TARGET_FPRS)
38955 pressure_classes[n++] = FLOAT_REGS;
38957 pressure_classes[n++] = CR_REGS;
38958 pressure_classes[n++] = SPECIAL_REGS;
38960 return n;
38963 /* Given FROM and TO register numbers, say whether this elimination is allowed.
38964 Frame pointer elimination is automatically handled.
38966 For the RS/6000, if frame pointer elimination is being done, we would like
38967 to convert ap into fp, not sp.
38969 We need r30 if -mminimal-toc was specified, and there are constant pool
38970 references. */
38972 static bool
38973 rs6000_can_eliminate (const int from, const int to)
38975 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
38976 ? ! frame_pointer_needed
38977 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
38978 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC
38979 || constant_pool_empty_p ()
38980 : true);
38983 /* Define the offset between two registers, FROM to be eliminated and its
38984 replacement TO, at the start of a routine. */
38985 HOST_WIDE_INT
38986 rs6000_initial_elimination_offset (int from, int to)
38988 rs6000_stack_t *info = rs6000_stack_info ();
38989 HOST_WIDE_INT offset;
38991 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
38992 offset = info->push_p ? 0 : -info->total_size;
38993 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
38995 offset = info->push_p ? 0 : -info->total_size;
38996 if (FRAME_GROWS_DOWNWARD)
38997 offset += info->fixed_size + info->vars_size + info->parm_size;
38999 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
39000 offset = FRAME_GROWS_DOWNWARD
39001 ? info->fixed_size + info->vars_size + info->parm_size
39002 : 0;
39003 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
39004 offset = info->total_size;
39005 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
39006 offset = info->push_p ? info->total_size : 0;
39007 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
39008 offset = 0;
39009 else
39010 gcc_unreachable ();
39012 return offset;
39015 static rtx
39016 rs6000_dwarf_register_span (rtx reg)
39018 rtx parts[8];
39019 int i, words;
39020 unsigned regno = REGNO (reg);
39021 machine_mode mode = GET_MODE (reg);
39023 if (TARGET_SPE
39024 && regno < 32
39025 && (SPE_VECTOR_MODE (GET_MODE (reg))
39026 || (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode)
39027 && mode != SFmode && mode != SDmode && mode != SCmode)))
39029 else
39030 return NULL_RTX;
39032 regno = REGNO (reg);
39034 /* The duality of the SPE register size wreaks all kinds of havoc.
39035 This is a way of distinguishing r0 in 32-bits from r0 in
39036 64-bits. */
39037 words = (GET_MODE_SIZE (mode) + UNITS_PER_FP_WORD - 1) / UNITS_PER_FP_WORD;
39038 gcc_assert (words <= 4);
39039 for (i = 0; i < words; i++, regno++)
39041 if (BYTES_BIG_ENDIAN)
39043 parts[2 * i] = gen_rtx_REG (SImode, regno + FIRST_SPE_HIGH_REGNO);
39044 parts[2 * i + 1] = gen_rtx_REG (SImode, regno);
39046 else
39048 parts[2 * i] = gen_rtx_REG (SImode, regno);
39049 parts[2 * i + 1] = gen_rtx_REG (SImode, regno + FIRST_SPE_HIGH_REGNO);
39053 return gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (words * 2, parts));
39056 /* Fill in sizes for SPE register high parts in table used by unwinder. */
39058 static void
39059 rs6000_init_dwarf_reg_sizes_extra (tree address)
39061 if (TARGET_SPE)
39063 int i;
39064 machine_mode mode = TYPE_MODE (char_type_node);
39065 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
39066 rtx mem = gen_rtx_MEM (BLKmode, addr);
39067 rtx value = gen_int_mode (4, mode);
39069 for (i = FIRST_SPE_HIGH_REGNO; i < LAST_SPE_HIGH_REGNO+1; i++)
39071 int column = DWARF_REG_TO_UNWIND_COLUMN
39072 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
39073 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
39075 emit_move_insn (adjust_address (mem, mode, offset), value);
39079 if (TARGET_MACHO && ! TARGET_ALTIVEC)
39081 int i;
39082 machine_mode mode = TYPE_MODE (char_type_node);
39083 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
39084 rtx mem = gen_rtx_MEM (BLKmode, addr);
39085 rtx value = gen_int_mode (16, mode);
39087 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
39088 The unwinder still needs to know the size of Altivec registers. */
39090 for (i = FIRST_ALTIVEC_REGNO; i < LAST_ALTIVEC_REGNO+1; i++)
39092 int column = DWARF_REG_TO_UNWIND_COLUMN
39093 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
39094 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
39096 emit_move_insn (adjust_address (mem, mode, offset), value);
39101 /* Map internal gcc register numbers to debug format register numbers.
39102 FORMAT specifies the type of debug register number to use:
39103 0 -- debug information, except for frame-related sections
39104 1 -- DWARF .debug_frame section
39105 2 -- DWARF .eh_frame section */
39107 unsigned int
39108 rs6000_dbx_register_number (unsigned int regno, unsigned int format)
39110 /* We never use the GCC internal number for SPE high registers.
39111 Those are mapped to the 1200..1231 range for all debug formats. */
39112 if (SPE_HIGH_REGNO_P (regno))
39113 return regno - FIRST_SPE_HIGH_REGNO + 1200;
39115 /* Except for the above, we use the internal number for non-DWARF
39116 debug information, and also for .eh_frame. */
39117 if ((format == 0 && write_symbols != DWARF2_DEBUG) || format == 2)
39118 return regno;
39120 /* On some platforms, we use the standard DWARF register
39121 numbering for .debug_info and .debug_frame. */
39122 #ifdef RS6000_USE_DWARF_NUMBERING
39123 if (regno <= 63)
39124 return regno;
39125 if (regno == LR_REGNO)
39126 return 108;
39127 if (regno == CTR_REGNO)
39128 return 109;
39129 /* Special handling for CR for .debug_frame: rs6000_emit_prologue has
39130 translated any combination of CR2, CR3, CR4 saves to a save of CR2.
39131 The actual code emitted saves the whole of CR, so we map CR2_REGNO
39132 to the DWARF reg for CR. */
39133 if (format == 1 && regno == CR2_REGNO)
39134 return 64;
39135 if (CR_REGNO_P (regno))
39136 return regno - CR0_REGNO + 86;
39137 if (regno == CA_REGNO)
39138 return 101; /* XER */
39139 if (ALTIVEC_REGNO_P (regno))
39140 return regno - FIRST_ALTIVEC_REGNO + 1124;
39141 if (regno == VRSAVE_REGNO)
39142 return 356;
39143 if (regno == VSCR_REGNO)
39144 return 67;
39145 if (regno == SPE_ACC_REGNO)
39146 return 99;
39147 if (regno == SPEFSCR_REGNO)
39148 return 612;
39149 #endif
39150 return regno;
39153 /* target hook eh_return_filter_mode */
39154 static machine_mode
39155 rs6000_eh_return_filter_mode (void)
39157 return TARGET_32BIT ? SImode : word_mode;
39160 /* Target hook for scalar_mode_supported_p. */
39161 static bool
39162 rs6000_scalar_mode_supported_p (machine_mode mode)
39164 /* -m32 does not support TImode. This is the default, from
39165 default_scalar_mode_supported_p. For -m32 -mpowerpc64 we want the
39166 same ABI as for -m32. But default_scalar_mode_supported_p allows
39167 integer modes of precision 2 * BITS_PER_WORD, which matches TImode
39168 for -mpowerpc64. */
39169 if (TARGET_32BIT && mode == TImode)
39170 return false;
39172 if (DECIMAL_FLOAT_MODE_P (mode))
39173 return default_decimal_float_supported_p ();
39174 else if (TARGET_FLOAT128_TYPE && (mode == KFmode || mode == IFmode))
39175 return true;
39176 else
39177 return default_scalar_mode_supported_p (mode);
39180 /* Target hook for vector_mode_supported_p. */
39181 static bool
39182 rs6000_vector_mode_supported_p (machine_mode mode)
39185 if (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (mode))
39186 return true;
39188 if (TARGET_SPE && SPE_VECTOR_MODE (mode))
39189 return true;
39191 /* There is no vector form for IEEE 128-bit. If we return true for IEEE
39192 128-bit, the compiler might try to widen IEEE 128-bit to IBM
39193 double-double. */
39194 else if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode) && !FLOAT128_IEEE_P (mode))
39195 return true;
39197 else
39198 return false;
39201 /* Target hook for floatn_mode. */
39202 static machine_mode
39203 rs6000_floatn_mode (int n, bool extended)
39205 if (extended)
39207 switch (n)
39209 case 32:
39210 return DFmode;
39212 case 64:
39213 if (TARGET_FLOAT128_KEYWORD)
39214 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
39215 else
39216 return VOIDmode;
39218 case 128:
39219 return VOIDmode;
39221 default:
39222 /* Those are the only valid _FloatNx types. */
39223 gcc_unreachable ();
39226 else
39228 switch (n)
39230 case 32:
39231 return SFmode;
39233 case 64:
39234 return DFmode;
39236 case 128:
39237 if (TARGET_FLOAT128_KEYWORD)
39238 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
39239 else
39240 return VOIDmode;
39242 default:
39243 return VOIDmode;
39249 /* Target hook for c_mode_for_suffix. */
39250 static machine_mode
39251 rs6000_c_mode_for_suffix (char suffix)
39253 if (TARGET_FLOAT128_TYPE)
39255 if (suffix == 'q' || suffix == 'Q')
39256 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
39258 /* At the moment, we are not defining a suffix for IBM extended double.
39259 If/when the default for -mabi=ieeelongdouble is changed, and we want
39260 to support __ibm128 constants in legacy library code, we may need to
39261 re-evalaute this decision. Currently, c-lex.c only supports 'w' and
39262 'q' as machine dependent suffixes. The x86_64 port uses 'w' for
39263 __float80 constants. */
39266 return VOIDmode;
39269 /* Target hook for invalid_arg_for_unprototyped_fn. */
39270 static const char *
39271 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
39273 return (!rs6000_darwin64_abi
39274 && typelist == 0
39275 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
39276 && (funcdecl == NULL_TREE
39277 || (TREE_CODE (funcdecl) == FUNCTION_DECL
39278 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
39279 ? N_("AltiVec argument passed to unprototyped function")
39280 : NULL;
39283 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
39284 setup by using __stack_chk_fail_local hidden function instead of
39285 calling __stack_chk_fail directly. Otherwise it is better to call
39286 __stack_chk_fail directly. */
39288 static tree ATTRIBUTE_UNUSED
39289 rs6000_stack_protect_fail (void)
39291 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
39292 ? default_hidden_stack_protect_fail ()
39293 : default_external_stack_protect_fail ();
39296 void
39297 rs6000_final_prescan_insn (rtx_insn *insn, rtx *operand ATTRIBUTE_UNUSED,
39298 int num_operands ATTRIBUTE_UNUSED)
39300 if (rs6000_warn_cell_microcode)
39302 const char *temp;
39303 int insn_code_number = recog_memoized (insn);
39304 location_t location = INSN_LOCATION (insn);
39306 /* Punt on insns we cannot recognize. */
39307 if (insn_code_number < 0)
39308 return;
39310 /* get_insn_template can modify recog_data, so save and restore it. */
39311 struct recog_data_d recog_data_save = recog_data;
39312 for (int i = 0; i < recog_data.n_operands; i++)
39313 recog_data.operand[i] = copy_rtx (recog_data.operand[i]);
39314 temp = get_insn_template (insn_code_number, insn);
39315 recog_data = recog_data_save;
39317 if (get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS)
39318 warning_at (location, OPT_mwarn_cell_microcode,
39319 "emitting microcode insn %s\t[%s] #%d",
39320 temp, insn_data[INSN_CODE (insn)].name, INSN_UID (insn));
39321 else if (get_attr_cell_micro (insn) == CELL_MICRO_CONDITIONAL)
39322 warning_at (location, OPT_mwarn_cell_microcode,
39323 "emitting conditional microcode insn %s\t[%s] #%d",
39324 temp, insn_data[INSN_CODE (insn)].name, INSN_UID (insn));
39328 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
39330 #if TARGET_ELF
39331 static unsigned HOST_WIDE_INT
39332 rs6000_asan_shadow_offset (void)
39334 return (unsigned HOST_WIDE_INT) 1 << (TARGET_64BIT ? 41 : 29);
39336 #endif
39338 /* Mask options that we want to support inside of attribute((target)) and
39339 #pragma GCC target operations. Note, we do not include things like
39340 64/32-bit, endianness, hard/soft floating point, etc. that would have
39341 different calling sequences. */
39343 struct rs6000_opt_mask {
39344 const char *name; /* option name */
39345 HOST_WIDE_INT mask; /* mask to set */
39346 bool invert; /* invert sense of mask */
39347 bool valid_target; /* option is a target option */
39350 static struct rs6000_opt_mask const rs6000_opt_masks[] =
39352 { "altivec", OPTION_MASK_ALTIVEC, false, true },
39353 { "cmpb", OPTION_MASK_CMPB, false, true },
39354 { "crypto", OPTION_MASK_CRYPTO, false, true },
39355 { "direct-move", OPTION_MASK_DIRECT_MOVE, false, true },
39356 { "dlmzb", OPTION_MASK_DLMZB, false, true },
39357 { "efficient-unaligned-vsx", OPTION_MASK_EFFICIENT_UNALIGNED_VSX,
39358 false, true },
39359 { "float128", OPTION_MASK_FLOAT128_KEYWORD, false, false },
39360 { "float128-type", OPTION_MASK_FLOAT128_TYPE, false, false },
39361 { "float128-hardware", OPTION_MASK_FLOAT128_HW, false, false },
39362 { "fprnd", OPTION_MASK_FPRND, false, true },
39363 { "hard-dfp", OPTION_MASK_DFP, false, true },
39364 { "htm", OPTION_MASK_HTM, false, true },
39365 { "isel", OPTION_MASK_ISEL, false, true },
39366 { "mfcrf", OPTION_MASK_MFCRF, false, true },
39367 { "mfpgpr", OPTION_MASK_MFPGPR, false, true },
39368 { "modulo", OPTION_MASK_MODULO, false, true },
39369 { "mulhw", OPTION_MASK_MULHW, false, true },
39370 { "multiple", OPTION_MASK_MULTIPLE, false, true },
39371 { "popcntb", OPTION_MASK_POPCNTB, false, true },
39372 { "popcntd", OPTION_MASK_POPCNTD, false, true },
39373 { "power8-fusion", OPTION_MASK_P8_FUSION, false, true },
39374 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN, false, true },
39375 { "power8-vector", OPTION_MASK_P8_VECTOR, false, true },
39376 { "power9-dform-scalar", OPTION_MASK_P9_DFORM_SCALAR, false, true },
39377 { "power9-dform-vector", OPTION_MASK_P9_DFORM_VECTOR, false, true },
39378 { "power9-fusion", OPTION_MASK_P9_FUSION, false, true },
39379 { "power9-minmax", OPTION_MASK_P9_MINMAX, false, true },
39380 { "power9-misc", OPTION_MASK_P9_MISC, false, true },
39381 { "power9-vector", OPTION_MASK_P9_VECTOR, false, true },
39382 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT, false, true },
39383 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT, false, true },
39384 { "quad-memory", OPTION_MASK_QUAD_MEMORY, false, true },
39385 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC, false, true },
39386 { "recip-precision", OPTION_MASK_RECIP_PRECISION, false, true },
39387 { "save-toc-indirect", OPTION_MASK_SAVE_TOC_INDIRECT, false, true },
39388 { "string", OPTION_MASK_STRING, false, true },
39389 { "toc-fusion", OPTION_MASK_TOC_FUSION, false, true },
39390 { "update", OPTION_MASK_NO_UPDATE, true , true },
39391 { "upper-regs-di", OPTION_MASK_UPPER_REGS_DI, false, true },
39392 { "upper-regs-df", OPTION_MASK_UPPER_REGS_DF, false, true },
39393 { "upper-regs-sf", OPTION_MASK_UPPER_REGS_SF, false, true },
39394 { "vsx", OPTION_MASK_VSX, false, true },
39395 { "vsx-small-integer", OPTION_MASK_VSX_SMALL_INTEGER, false, true },
39396 { "vsx-timode", OPTION_MASK_VSX_TIMODE, false, true },
39397 #ifdef OPTION_MASK_64BIT
39398 #if TARGET_AIX_OS
39399 { "aix64", OPTION_MASK_64BIT, false, false },
39400 { "aix32", OPTION_MASK_64BIT, true, false },
39401 #else
39402 { "64", OPTION_MASK_64BIT, false, false },
39403 { "32", OPTION_MASK_64BIT, true, false },
39404 #endif
39405 #endif
39406 #ifdef OPTION_MASK_EABI
39407 { "eabi", OPTION_MASK_EABI, false, false },
39408 #endif
39409 #ifdef OPTION_MASK_LITTLE_ENDIAN
39410 { "little", OPTION_MASK_LITTLE_ENDIAN, false, false },
39411 { "big", OPTION_MASK_LITTLE_ENDIAN, true, false },
39412 #endif
39413 #ifdef OPTION_MASK_RELOCATABLE
39414 { "relocatable", OPTION_MASK_RELOCATABLE, false, false },
39415 #endif
39416 #ifdef OPTION_MASK_STRICT_ALIGN
39417 { "strict-align", OPTION_MASK_STRICT_ALIGN, false, false },
39418 #endif
39419 { "soft-float", OPTION_MASK_SOFT_FLOAT, false, false },
39420 { "string", OPTION_MASK_STRING, false, false },
39423 /* Builtin mask mapping for printing the flags. */
39424 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
39426 { "altivec", RS6000_BTM_ALTIVEC, false, false },
39427 { "vsx", RS6000_BTM_VSX, false, false },
39428 { "spe", RS6000_BTM_SPE, false, false },
39429 { "paired", RS6000_BTM_PAIRED, false, false },
39430 { "fre", RS6000_BTM_FRE, false, false },
39431 { "fres", RS6000_BTM_FRES, false, false },
39432 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
39433 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
39434 { "popcntd", RS6000_BTM_POPCNTD, false, false },
39435 { "cell", RS6000_BTM_CELL, false, false },
39436 { "power8-vector", RS6000_BTM_P8_VECTOR, false, false },
39437 { "power9-vector", RS6000_BTM_P9_VECTOR, false, false },
39438 { "power9-misc", RS6000_BTM_P9_MISC, false, false },
39439 { "crypto", RS6000_BTM_CRYPTO, false, false },
39440 { "htm", RS6000_BTM_HTM, false, false },
39441 { "hard-dfp", RS6000_BTM_DFP, false, false },
39442 { "hard-float", RS6000_BTM_HARD_FLOAT, false, false },
39443 { "long-double-128", RS6000_BTM_LDBL128, false, false },
39444 { "float128", RS6000_BTM_FLOAT128, false, false },
39447 /* Option variables that we want to support inside attribute((target)) and
39448 #pragma GCC target operations. */
39450 struct rs6000_opt_var {
39451 const char *name; /* option name */
39452 size_t global_offset; /* offset of the option in global_options. */
39453 size_t target_offset; /* offset of the option in target options. */
39456 static struct rs6000_opt_var const rs6000_opt_vars[] =
39458 { "friz",
39459 offsetof (struct gcc_options, x_TARGET_FRIZ),
39460 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
39461 { "avoid-indexed-addresses",
39462 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
39463 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
39464 { "paired",
39465 offsetof (struct gcc_options, x_rs6000_paired_float),
39466 offsetof (struct cl_target_option, x_rs6000_paired_float), },
39467 { "longcall",
39468 offsetof (struct gcc_options, x_rs6000_default_long_calls),
39469 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
39470 { "optimize-swaps",
39471 offsetof (struct gcc_options, x_rs6000_optimize_swaps),
39472 offsetof (struct cl_target_option, x_rs6000_optimize_swaps), },
39473 { "allow-movmisalign",
39474 offsetof (struct gcc_options, x_TARGET_ALLOW_MOVMISALIGN),
39475 offsetof (struct cl_target_option, x_TARGET_ALLOW_MOVMISALIGN), },
39476 { "allow-df-permute",
39477 offsetof (struct gcc_options, x_TARGET_ALLOW_DF_PERMUTE),
39478 offsetof (struct cl_target_option, x_TARGET_ALLOW_DF_PERMUTE), },
39479 { "sched-groups",
39480 offsetof (struct gcc_options, x_TARGET_SCHED_GROUPS),
39481 offsetof (struct cl_target_option, x_TARGET_SCHED_GROUPS), },
39482 { "always-hint",
39483 offsetof (struct gcc_options, x_TARGET_ALWAYS_HINT),
39484 offsetof (struct cl_target_option, x_TARGET_ALWAYS_HINT), },
39485 { "align-branch-targets",
39486 offsetof (struct gcc_options, x_TARGET_ALIGN_BRANCH_TARGETS),
39487 offsetof (struct cl_target_option, x_TARGET_ALIGN_BRANCH_TARGETS), },
39488 { "vectorize-builtins",
39489 offsetof (struct gcc_options, x_TARGET_VECTORIZE_BUILTINS),
39490 offsetof (struct cl_target_option, x_TARGET_VECTORIZE_BUILTINS), },
39491 { "tls-markers",
39492 offsetof (struct gcc_options, x_tls_markers),
39493 offsetof (struct cl_target_option, x_tls_markers), },
39494 { "sched-prolog",
39495 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
39496 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
39497 { "sched-epilog",
39498 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
39499 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
39500 { "gen-cell-microcode",
39501 offsetof (struct gcc_options, x_rs6000_gen_cell_microcode),
39502 offsetof (struct cl_target_option, x_rs6000_gen_cell_microcode), },
39503 { "warn-cell-microcode",
39504 offsetof (struct gcc_options, x_rs6000_warn_cell_microcode),
39505 offsetof (struct cl_target_option, x_rs6000_warn_cell_microcode), },
39508 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
39509 parsing. Return true if there were no errors. */
39511 static bool
39512 rs6000_inner_target_options (tree args, bool attr_p)
39514 bool ret = true;
39516 if (args == NULL_TREE)
39519 else if (TREE_CODE (args) == STRING_CST)
39521 char *p = ASTRDUP (TREE_STRING_POINTER (args));
39522 char *q;
39524 while ((q = strtok (p, ",")) != NULL)
39526 bool error_p = false;
39527 bool not_valid_p = false;
39528 const char *cpu_opt = NULL;
39530 p = NULL;
39531 if (strncmp (q, "cpu=", 4) == 0)
39533 int cpu_index = rs6000_cpu_name_lookup (q+4);
39534 if (cpu_index >= 0)
39535 rs6000_cpu_index = cpu_index;
39536 else
39538 error_p = true;
39539 cpu_opt = q+4;
39542 else if (strncmp (q, "tune=", 5) == 0)
39544 int tune_index = rs6000_cpu_name_lookup (q+5);
39545 if (tune_index >= 0)
39546 rs6000_tune_index = tune_index;
39547 else
39549 error_p = true;
39550 cpu_opt = q+5;
39553 else
39555 size_t i;
39556 bool invert = false;
39557 char *r = q;
39559 error_p = true;
39560 if (strncmp (r, "no-", 3) == 0)
39562 invert = true;
39563 r += 3;
39566 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
39567 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
39569 HOST_WIDE_INT mask = rs6000_opt_masks[i].mask;
39571 if (!rs6000_opt_masks[i].valid_target)
39572 not_valid_p = true;
39573 else
39575 error_p = false;
39576 rs6000_isa_flags_explicit |= mask;
39578 /* VSX needs altivec, so -mvsx automagically sets
39579 altivec and disables -mavoid-indexed-addresses. */
39580 if (!invert)
39582 if (mask == OPTION_MASK_VSX)
39584 mask |= OPTION_MASK_ALTIVEC;
39585 TARGET_AVOID_XFORM = 0;
39589 if (rs6000_opt_masks[i].invert)
39590 invert = !invert;
39592 if (invert)
39593 rs6000_isa_flags &= ~mask;
39594 else
39595 rs6000_isa_flags |= mask;
39597 break;
39600 if (error_p && !not_valid_p)
39602 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
39603 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
39605 size_t j = rs6000_opt_vars[i].global_offset;
39606 *((int *) ((char *)&global_options + j)) = !invert;
39607 error_p = false;
39608 not_valid_p = false;
39609 break;
39614 if (error_p)
39616 const char *eprefix, *esuffix;
39618 ret = false;
39619 if (attr_p)
39621 eprefix = "__attribute__((__target__(";
39622 esuffix = ")))";
39624 else
39626 eprefix = "#pragma GCC target ";
39627 esuffix = "";
39630 if (cpu_opt)
39631 error ("invalid cpu \"%s\" for %s\"%s\"%s", cpu_opt, eprefix,
39632 q, esuffix);
39633 else if (not_valid_p)
39634 error ("%s\"%s\"%s is not allowed", eprefix, q, esuffix);
39635 else
39636 error ("%s\"%s\"%s is invalid", eprefix, q, esuffix);
39641 else if (TREE_CODE (args) == TREE_LIST)
39645 tree value = TREE_VALUE (args);
39646 if (value)
39648 bool ret2 = rs6000_inner_target_options (value, attr_p);
39649 if (!ret2)
39650 ret = false;
39652 args = TREE_CHAIN (args);
39654 while (args != NULL_TREE);
39657 else
39659 error ("attribute %<target%> argument not a string");
39660 return false;
39663 return ret;
39666 /* Print out the target options as a list for -mdebug=target. */
39668 static void
39669 rs6000_debug_target_options (tree args, const char *prefix)
39671 if (args == NULL_TREE)
39672 fprintf (stderr, "%s<NULL>", prefix);
39674 else if (TREE_CODE (args) == STRING_CST)
39676 char *p = ASTRDUP (TREE_STRING_POINTER (args));
39677 char *q;
39679 while ((q = strtok (p, ",")) != NULL)
39681 p = NULL;
39682 fprintf (stderr, "%s\"%s\"", prefix, q);
39683 prefix = ", ";
39687 else if (TREE_CODE (args) == TREE_LIST)
39691 tree value = TREE_VALUE (args);
39692 if (value)
39694 rs6000_debug_target_options (value, prefix);
39695 prefix = ", ";
39697 args = TREE_CHAIN (args);
39699 while (args != NULL_TREE);
39702 else
39703 gcc_unreachable ();
39705 return;
39709 /* Hook to validate attribute((target("..."))). */
39711 static bool
39712 rs6000_valid_attribute_p (tree fndecl,
39713 tree ARG_UNUSED (name),
39714 tree args,
39715 int flags)
39717 struct cl_target_option cur_target;
39718 bool ret;
39719 tree old_optimize = build_optimization_node (&global_options);
39720 tree new_target, new_optimize;
39721 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
39723 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
39725 if (TARGET_DEBUG_TARGET)
39727 tree tname = DECL_NAME (fndecl);
39728 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
39729 if (tname)
39730 fprintf (stderr, "function: %.*s\n",
39731 (int) IDENTIFIER_LENGTH (tname),
39732 IDENTIFIER_POINTER (tname));
39733 else
39734 fprintf (stderr, "function: unknown\n");
39736 fprintf (stderr, "args:");
39737 rs6000_debug_target_options (args, " ");
39738 fprintf (stderr, "\n");
39740 if (flags)
39741 fprintf (stderr, "flags: 0x%x\n", flags);
39743 fprintf (stderr, "--------------------\n");
39746 old_optimize = build_optimization_node (&global_options);
39747 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
39749 /* If the function changed the optimization levels as well as setting target
39750 options, start with the optimizations specified. */
39751 if (func_optimize && func_optimize != old_optimize)
39752 cl_optimization_restore (&global_options,
39753 TREE_OPTIMIZATION (func_optimize));
39755 /* The target attributes may also change some optimization flags, so update
39756 the optimization options if necessary. */
39757 cl_target_option_save (&cur_target, &global_options);
39758 rs6000_cpu_index = rs6000_tune_index = -1;
39759 ret = rs6000_inner_target_options (args, true);
39761 /* Set up any additional state. */
39762 if (ret)
39764 ret = rs6000_option_override_internal (false);
39765 new_target = build_target_option_node (&global_options);
39767 else
39768 new_target = NULL;
39770 new_optimize = build_optimization_node (&global_options);
39772 if (!new_target)
39773 ret = false;
39775 else if (fndecl)
39777 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
39779 if (old_optimize != new_optimize)
39780 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
39783 cl_target_option_restore (&global_options, &cur_target);
39785 if (old_optimize != new_optimize)
39786 cl_optimization_restore (&global_options,
39787 TREE_OPTIMIZATION (old_optimize));
39789 return ret;
39793 /* Hook to validate the current #pragma GCC target and set the state, and
39794 update the macros based on what was changed. If ARGS is NULL, then
39795 POP_TARGET is used to reset the options. */
39797 bool
39798 rs6000_pragma_target_parse (tree args, tree pop_target)
39800 tree prev_tree = build_target_option_node (&global_options);
39801 tree cur_tree;
39802 struct cl_target_option *prev_opt, *cur_opt;
39803 HOST_WIDE_INT prev_flags, cur_flags, diff_flags;
39804 HOST_WIDE_INT prev_bumask, cur_bumask, diff_bumask;
39806 if (TARGET_DEBUG_TARGET)
39808 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
39809 fprintf (stderr, "args:");
39810 rs6000_debug_target_options (args, " ");
39811 fprintf (stderr, "\n");
39813 if (pop_target)
39815 fprintf (stderr, "pop_target:\n");
39816 debug_tree (pop_target);
39818 else
39819 fprintf (stderr, "pop_target: <NULL>\n");
39821 fprintf (stderr, "--------------------\n");
39824 if (! args)
39826 cur_tree = ((pop_target)
39827 ? pop_target
39828 : target_option_default_node);
39829 cl_target_option_restore (&global_options,
39830 TREE_TARGET_OPTION (cur_tree));
39832 else
39834 rs6000_cpu_index = rs6000_tune_index = -1;
39835 if (!rs6000_inner_target_options (args, false)
39836 || !rs6000_option_override_internal (false)
39837 || (cur_tree = build_target_option_node (&global_options))
39838 == NULL_TREE)
39840 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
39841 fprintf (stderr, "invalid pragma\n");
39843 return false;
39847 target_option_current_node = cur_tree;
39849 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
39850 change the macros that are defined. */
39851 if (rs6000_target_modify_macros_ptr)
39853 prev_opt = TREE_TARGET_OPTION (prev_tree);
39854 prev_bumask = prev_opt->x_rs6000_builtin_mask;
39855 prev_flags = prev_opt->x_rs6000_isa_flags;
39857 cur_opt = TREE_TARGET_OPTION (cur_tree);
39858 cur_flags = cur_opt->x_rs6000_isa_flags;
39859 cur_bumask = cur_opt->x_rs6000_builtin_mask;
39861 diff_bumask = (prev_bumask ^ cur_bumask);
39862 diff_flags = (prev_flags ^ cur_flags);
39864 if ((diff_flags != 0) || (diff_bumask != 0))
39866 /* Delete old macros. */
39867 rs6000_target_modify_macros_ptr (false,
39868 prev_flags & diff_flags,
39869 prev_bumask & diff_bumask);
39871 /* Define new macros. */
39872 rs6000_target_modify_macros_ptr (true,
39873 cur_flags & diff_flags,
39874 cur_bumask & diff_bumask);
39878 return true;
39882 /* Remember the last target of rs6000_set_current_function. */
39883 static GTY(()) tree rs6000_previous_fndecl;
39885 /* Establish appropriate back-end context for processing the function
39886 FNDECL. The argument might be NULL to indicate processing at top
39887 level, outside of any function scope. */
39888 static void
39889 rs6000_set_current_function (tree fndecl)
39891 tree old_tree = (rs6000_previous_fndecl
39892 ? DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl)
39893 : NULL_TREE);
39895 tree new_tree = (fndecl
39896 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
39897 : NULL_TREE);
39899 if (TARGET_DEBUG_TARGET)
39901 bool print_final = false;
39902 fprintf (stderr, "\n==================== rs6000_set_current_function");
39904 if (fndecl)
39905 fprintf (stderr, ", fndecl %s (%p)",
39906 (DECL_NAME (fndecl)
39907 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
39908 : "<unknown>"), (void *)fndecl);
39910 if (rs6000_previous_fndecl)
39911 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
39913 fprintf (stderr, "\n");
39914 if (new_tree)
39916 fprintf (stderr, "\nnew fndecl target specific options:\n");
39917 debug_tree (new_tree);
39918 print_final = true;
39921 if (old_tree)
39923 fprintf (stderr, "\nold fndecl target specific options:\n");
39924 debug_tree (old_tree);
39925 print_final = true;
39928 if (print_final)
39929 fprintf (stderr, "--------------------\n");
39932 /* Only change the context if the function changes. This hook is called
39933 several times in the course of compiling a function, and we don't want to
39934 slow things down too much or call target_reinit when it isn't safe. */
39935 if (fndecl && fndecl != rs6000_previous_fndecl)
39937 rs6000_previous_fndecl = fndecl;
39938 if (old_tree == new_tree)
39941 else if (new_tree && new_tree != target_option_default_node)
39943 cl_target_option_restore (&global_options,
39944 TREE_TARGET_OPTION (new_tree));
39945 if (TREE_TARGET_GLOBALS (new_tree))
39946 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
39947 else
39948 TREE_TARGET_GLOBALS (new_tree)
39949 = save_target_globals_default_opts ();
39952 else if (old_tree && old_tree != target_option_default_node)
39954 new_tree = target_option_current_node;
39955 cl_target_option_restore (&global_options,
39956 TREE_TARGET_OPTION (new_tree));
39957 if (TREE_TARGET_GLOBALS (new_tree))
39958 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
39959 else if (new_tree == target_option_default_node)
39960 restore_target_globals (&default_target_globals);
39961 else
39962 TREE_TARGET_GLOBALS (new_tree)
39963 = save_target_globals_default_opts ();
39969 /* Save the current options */
39971 static void
39972 rs6000_function_specific_save (struct cl_target_option *ptr,
39973 struct gcc_options *opts)
39975 ptr->x_rs6000_isa_flags = opts->x_rs6000_isa_flags;
39976 ptr->x_rs6000_isa_flags_explicit = opts->x_rs6000_isa_flags_explicit;
39979 /* Restore the current options */
39981 static void
39982 rs6000_function_specific_restore (struct gcc_options *opts,
39983 struct cl_target_option *ptr)
39986 opts->x_rs6000_isa_flags = ptr->x_rs6000_isa_flags;
39987 opts->x_rs6000_isa_flags_explicit = ptr->x_rs6000_isa_flags_explicit;
39988 (void) rs6000_option_override_internal (false);
39991 /* Print the current options */
39993 static void
39994 rs6000_function_specific_print (FILE *file, int indent,
39995 struct cl_target_option *ptr)
39997 rs6000_print_isa_options (file, indent, "Isa options set",
39998 ptr->x_rs6000_isa_flags);
40000 rs6000_print_isa_options (file, indent, "Isa options explicit",
40001 ptr->x_rs6000_isa_flags_explicit);
40004 /* Helper function to print the current isa or misc options on a line. */
40006 static void
40007 rs6000_print_options_internal (FILE *file,
40008 int indent,
40009 const char *string,
40010 HOST_WIDE_INT flags,
40011 const char *prefix,
40012 const struct rs6000_opt_mask *opts,
40013 size_t num_elements)
40015 size_t i;
40016 size_t start_column = 0;
40017 size_t cur_column;
40018 size_t max_column = 120;
40019 size_t prefix_len = strlen (prefix);
40020 size_t comma_len = 0;
40021 const char *comma = "";
40023 if (indent)
40024 start_column += fprintf (file, "%*s", indent, "");
40026 if (!flags)
40028 fprintf (stderr, DEBUG_FMT_S, string, "<none>");
40029 return;
40032 start_column += fprintf (stderr, DEBUG_FMT_WX, string, flags);
40034 /* Print the various mask options. */
40035 cur_column = start_column;
40036 for (i = 0; i < num_elements; i++)
40038 bool invert = opts[i].invert;
40039 const char *name = opts[i].name;
40040 const char *no_str = "";
40041 HOST_WIDE_INT mask = opts[i].mask;
40042 size_t len = comma_len + prefix_len + strlen (name);
40044 if (!invert)
40046 if ((flags & mask) == 0)
40048 no_str = "no-";
40049 len += sizeof ("no-") - 1;
40052 flags &= ~mask;
40055 else
40057 if ((flags & mask) != 0)
40059 no_str = "no-";
40060 len += sizeof ("no-") - 1;
40063 flags |= mask;
40066 cur_column += len;
40067 if (cur_column > max_column)
40069 fprintf (stderr, ", \\\n%*s", (int)start_column, "");
40070 cur_column = start_column + len;
40071 comma = "";
40074 fprintf (file, "%s%s%s%s", comma, prefix, no_str, name);
40075 comma = ", ";
40076 comma_len = sizeof (", ") - 1;
40079 fputs ("\n", file);
40082 /* Helper function to print the current isa options on a line. */
40084 static void
40085 rs6000_print_isa_options (FILE *file, int indent, const char *string,
40086 HOST_WIDE_INT flags)
40088 rs6000_print_options_internal (file, indent, string, flags, "-m",
40089 &rs6000_opt_masks[0],
40090 ARRAY_SIZE (rs6000_opt_masks));
40093 static void
40094 rs6000_print_builtin_options (FILE *file, int indent, const char *string,
40095 HOST_WIDE_INT flags)
40097 rs6000_print_options_internal (file, indent, string, flags, "",
40098 &rs6000_builtin_mask_names[0],
40099 ARRAY_SIZE (rs6000_builtin_mask_names));
40102 /* If the user used -mno-vsx, we need turn off all of the implicit ISA 2.06,
40103 2.07, and 3.0 options that relate to the vector unit (-mdirect-move,
40104 -mvsx-timode, -mupper-regs-df).
40106 If the user used -mno-power8-vector, we need to turn off all of the implicit
40107 ISA 2.07 and 3.0 options that relate to the vector unit.
40109 If the user used -mno-power9-vector, we need to turn off all of the implicit
40110 ISA 3.0 options that relate to the vector unit.
40112 This function does not handle explicit options such as the user specifying
40113 -mdirect-move. These are handled in rs6000_option_override_internal, and
40114 the appropriate error is given if needed.
40116 We return a mask of all of the implicit options that should not be enabled
40117 by default. */
40119 static HOST_WIDE_INT
40120 rs6000_disable_incompatible_switches (void)
40122 HOST_WIDE_INT ignore_masks = rs6000_isa_flags_explicit;
40123 size_t i, j;
40125 static const struct {
40126 const HOST_WIDE_INT no_flag; /* flag explicitly turned off. */
40127 const HOST_WIDE_INT dep_flags; /* flags that depend on this option. */
40128 const char *const name; /* name of the switch. */
40129 } flags[] = {
40130 { OPTION_MASK_P9_VECTOR, OTHER_P9_VECTOR_MASKS, "power9-vector" },
40131 { OPTION_MASK_P8_VECTOR, OTHER_P8_VECTOR_MASKS, "power8-vector" },
40132 { OPTION_MASK_VSX, OTHER_VSX_VECTOR_MASKS, "vsx" },
40135 for (i = 0; i < ARRAY_SIZE (flags); i++)
40137 HOST_WIDE_INT no_flag = flags[i].no_flag;
40139 if ((rs6000_isa_flags & no_flag) == 0
40140 && (rs6000_isa_flags_explicit & no_flag) != 0)
40142 HOST_WIDE_INT dep_flags = flags[i].dep_flags;
40143 HOST_WIDE_INT set_flags = (rs6000_isa_flags_explicit
40144 & rs6000_isa_flags
40145 & dep_flags);
40147 if (set_flags)
40149 for (j = 0; j < ARRAY_SIZE (rs6000_opt_masks); j++)
40150 if ((set_flags & rs6000_opt_masks[j].mask) != 0)
40152 set_flags &= ~rs6000_opt_masks[j].mask;
40153 error ("-mno-%s turns off -m%s",
40154 flags[i].name,
40155 rs6000_opt_masks[j].name);
40158 gcc_assert (!set_flags);
40161 rs6000_isa_flags &= ~dep_flags;
40162 ignore_masks |= no_flag | dep_flags;
40166 if (!TARGET_P9_VECTOR
40167 && (rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) != 0
40168 && TARGET_P9_DFORM_BOTH > 0)
40170 error ("-mno-power9-vector turns off -mpower9-dform");
40171 TARGET_P9_DFORM_BOTH = 0;
40174 return ignore_masks;
40178 /* Hook to determine if one function can safely inline another. */
40180 static bool
40181 rs6000_can_inline_p (tree caller, tree callee)
40183 bool ret = false;
40184 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
40185 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
40187 /* If callee has no option attributes, then it is ok to inline. */
40188 if (!callee_tree)
40189 ret = true;
40191 /* If caller has no option attributes, but callee does then it is not ok to
40192 inline. */
40193 else if (!caller_tree)
40194 ret = false;
40196 else
40198 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
40199 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
40201 /* Callee's options should a subset of the caller's, i.e. a vsx function
40202 can inline an altivec function but a non-vsx function can't inline a
40203 vsx function. */
40204 if ((caller_opts->x_rs6000_isa_flags & callee_opts->x_rs6000_isa_flags)
40205 == callee_opts->x_rs6000_isa_flags)
40206 ret = true;
40209 if (TARGET_DEBUG_TARGET)
40210 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
40211 (DECL_NAME (caller)
40212 ? IDENTIFIER_POINTER (DECL_NAME (caller))
40213 : "<unknown>"),
40214 (DECL_NAME (callee)
40215 ? IDENTIFIER_POINTER (DECL_NAME (callee))
40216 : "<unknown>"),
40217 (ret ? "can" : "cannot"));
40219 return ret;
40222 /* Allocate a stack temp and fixup the address so it meets the particular
40223 memory requirements (either offetable or REG+REG addressing). */
40226 rs6000_allocate_stack_temp (machine_mode mode,
40227 bool offsettable_p,
40228 bool reg_reg_p)
40230 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
40231 rtx addr = XEXP (stack, 0);
40232 int strict_p = (reload_in_progress || reload_completed);
40234 if (!legitimate_indirect_address_p (addr, strict_p))
40236 if (offsettable_p
40237 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
40238 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
40240 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
40241 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
40244 return stack;
40247 /* Given a memory reference, if it is not a reg or reg+reg addressing, convert
40248 to such a form to deal with memory reference instructions like STFIWX that
40249 only take reg+reg addressing. */
40252 rs6000_address_for_fpconvert (rtx x)
40254 int strict_p = (reload_in_progress || reload_completed);
40255 rtx addr;
40257 gcc_assert (MEM_P (x));
40258 addr = XEXP (x, 0);
40259 if (! legitimate_indirect_address_p (addr, strict_p)
40260 && ! legitimate_indexed_address_p (addr, strict_p))
40262 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
40264 rtx reg = XEXP (addr, 0);
40265 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
40266 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
40267 gcc_assert (REG_P (reg));
40268 emit_insn (gen_add3_insn (reg, reg, size_rtx));
40269 addr = reg;
40271 else if (GET_CODE (addr) == PRE_MODIFY)
40273 rtx reg = XEXP (addr, 0);
40274 rtx expr = XEXP (addr, 1);
40275 gcc_assert (REG_P (reg));
40276 gcc_assert (GET_CODE (expr) == PLUS);
40277 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
40278 addr = reg;
40281 x = replace_equiv_address (x, copy_addr_to_reg (addr));
40284 return x;
40287 /* Given a memory reference, if it is not in the form for altivec memory
40288 reference instructions (i.e. reg or reg+reg addressing with AND of -16),
40289 convert to the altivec format. */
40292 rs6000_address_for_altivec (rtx x)
40294 gcc_assert (MEM_P (x));
40295 if (!altivec_indexed_or_indirect_operand (x, GET_MODE (x)))
40297 rtx addr = XEXP (x, 0);
40298 int strict_p = (reload_in_progress || reload_completed);
40300 if (!legitimate_indexed_address_p (addr, strict_p)
40301 && !legitimate_indirect_address_p (addr, strict_p))
40302 addr = copy_to_mode_reg (Pmode, addr);
40304 addr = gen_rtx_AND (Pmode, addr, GEN_INT (-16));
40305 x = change_address (x, GET_MODE (x), addr);
40308 return x;
40311 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
40313 On the RS/6000, all integer constants are acceptable, most won't be valid
40314 for particular insns, though. Only easy FP constants are acceptable. */
40316 static bool
40317 rs6000_legitimate_constant_p (machine_mode mode, rtx x)
40319 if (TARGET_ELF && tls_referenced_p (x))
40320 return false;
40322 return ((GET_CODE (x) != CONST_DOUBLE && GET_CODE (x) != CONST_VECTOR)
40323 || GET_MODE (x) == VOIDmode
40324 || (TARGET_POWERPC64 && mode == DImode)
40325 || easy_fp_constant (x, mode)
40326 || easy_vector_constant (x, mode));
40330 /* Return TRUE iff the sequence ending in LAST sets the static chain. */
40332 static bool
40333 chain_already_loaded (rtx_insn *last)
40335 for (; last != NULL; last = PREV_INSN (last))
40337 if (NONJUMP_INSN_P (last))
40339 rtx patt = PATTERN (last);
40341 if (GET_CODE (patt) == SET)
40343 rtx lhs = XEXP (patt, 0);
40345 if (REG_P (lhs) && REGNO (lhs) == STATIC_CHAIN_REGNUM)
40346 return true;
40350 return false;
40353 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
40355 void
40356 rs6000_call_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
40358 const bool direct_call_p
40359 = GET_CODE (func_desc) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (func_desc);
40360 rtx toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
40361 rtx toc_load = NULL_RTX;
40362 rtx toc_restore = NULL_RTX;
40363 rtx func_addr;
40364 rtx abi_reg = NULL_RTX;
40365 rtx call[4];
40366 int n_call;
40367 rtx insn;
40369 /* Handle longcall attributes. */
40370 if (INTVAL (cookie) & CALL_LONG)
40371 func_desc = rs6000_longcall_ref (func_desc);
40373 /* Handle indirect calls. */
40374 if (GET_CODE (func_desc) != SYMBOL_REF
40375 || (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (func_desc)))
40377 /* Save the TOC into its reserved slot before the call,
40378 and prepare to restore it after the call. */
40379 rtx stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
40380 rtx stack_toc_offset = GEN_INT (RS6000_TOC_SAVE_SLOT);
40381 rtx stack_toc_mem = gen_frame_mem (Pmode,
40382 gen_rtx_PLUS (Pmode, stack_ptr,
40383 stack_toc_offset));
40384 rtx stack_toc_unspec = gen_rtx_UNSPEC (Pmode,
40385 gen_rtvec (1, stack_toc_offset),
40386 UNSPEC_TOCSLOT);
40387 toc_restore = gen_rtx_SET (toc_reg, stack_toc_unspec);
40389 /* Can we optimize saving the TOC in the prologue or
40390 do we need to do it at every call? */
40391 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
40392 cfun->machine->save_toc_in_prologue = true;
40393 else
40395 MEM_VOLATILE_P (stack_toc_mem) = 1;
40396 emit_move_insn (stack_toc_mem, toc_reg);
40399 if (DEFAULT_ABI == ABI_ELFv2)
40401 /* A function pointer in the ELFv2 ABI is just a plain address, but
40402 the ABI requires it to be loaded into r12 before the call. */
40403 func_addr = gen_rtx_REG (Pmode, 12);
40404 emit_move_insn (func_addr, func_desc);
40405 abi_reg = func_addr;
40407 else
40409 /* A function pointer under AIX is a pointer to a data area whose
40410 first word contains the actual address of the function, whose
40411 second word contains a pointer to its TOC, and whose third word
40412 contains a value to place in the static chain register (r11).
40413 Note that if we load the static chain, our "trampoline" need
40414 not have any executable code. */
40416 /* Load up address of the actual function. */
40417 func_desc = force_reg (Pmode, func_desc);
40418 func_addr = gen_reg_rtx (Pmode);
40419 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func_desc));
40421 /* Prepare to load the TOC of the called function. Note that the
40422 TOC load must happen immediately before the actual call so
40423 that unwinding the TOC registers works correctly. See the
40424 comment in frob_update_context. */
40425 rtx func_toc_offset = GEN_INT (GET_MODE_SIZE (Pmode));
40426 rtx func_toc_mem = gen_rtx_MEM (Pmode,
40427 gen_rtx_PLUS (Pmode, func_desc,
40428 func_toc_offset));
40429 toc_load = gen_rtx_USE (VOIDmode, func_toc_mem);
40431 /* If we have a static chain, load it up. But, if the call was
40432 originally direct, the 3rd word has not been written since no
40433 trampoline has been built, so we ought not to load it, lest we
40434 override a static chain value. */
40435 if (!direct_call_p
40436 && TARGET_POINTERS_TO_NESTED_FUNCTIONS
40437 && !chain_already_loaded (get_current_sequence ()->next->last))
40439 rtx sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
40440 rtx func_sc_offset = GEN_INT (2 * GET_MODE_SIZE (Pmode));
40441 rtx func_sc_mem = gen_rtx_MEM (Pmode,
40442 gen_rtx_PLUS (Pmode, func_desc,
40443 func_sc_offset));
40444 emit_move_insn (sc_reg, func_sc_mem);
40445 abi_reg = sc_reg;
40449 else
40451 /* Direct calls use the TOC: for local calls, the callee will
40452 assume the TOC register is set; for non-local calls, the
40453 PLT stub needs the TOC register. */
40454 abi_reg = toc_reg;
40455 func_addr = func_desc;
40458 /* Create the call. */
40459 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), flag);
40460 if (value != NULL_RTX)
40461 call[0] = gen_rtx_SET (value, call[0]);
40462 n_call = 1;
40464 if (toc_load)
40465 call[n_call++] = toc_load;
40466 if (toc_restore)
40467 call[n_call++] = toc_restore;
40469 call[n_call++] = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
40471 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n_call, call));
40472 insn = emit_call_insn (insn);
40474 /* Mention all registers defined by the ABI to hold information
40475 as uses in CALL_INSN_FUNCTION_USAGE. */
40476 if (abi_reg)
40477 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
40480 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
40482 void
40483 rs6000_sibcall_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
40485 rtx call[2];
40486 rtx insn;
40488 gcc_assert (INTVAL (cookie) == 0);
40490 /* Create the call. */
40491 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_desc), flag);
40492 if (value != NULL_RTX)
40493 call[0] = gen_rtx_SET (value, call[0]);
40495 call[1] = simple_return_rtx;
40497 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (2, call));
40498 insn = emit_call_insn (insn);
40500 /* Note use of the TOC register. */
40501 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, TOC_REGNUM));
40504 /* Return whether we need to always update the saved TOC pointer when we update
40505 the stack pointer. */
40507 static bool
40508 rs6000_save_toc_in_prologue_p (void)
40510 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
40513 #ifdef HAVE_GAS_HIDDEN
40514 # define USE_HIDDEN_LINKONCE 1
40515 #else
40516 # define USE_HIDDEN_LINKONCE 0
40517 #endif
40519 /* Fills in the label name that should be used for a 476 link stack thunk. */
40521 void
40522 get_ppc476_thunk_name (char name[32])
40524 gcc_assert (TARGET_LINK_STACK);
40526 if (USE_HIDDEN_LINKONCE)
40527 sprintf (name, "__ppc476.get_thunk");
40528 else
40529 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
40532 /* This function emits the simple thunk routine that is used to preserve
40533 the link stack on the 476 cpu. */
40535 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
40536 static void
40537 rs6000_code_end (void)
40539 char name[32];
40540 tree decl;
40542 if (!TARGET_LINK_STACK)
40543 return;
40545 get_ppc476_thunk_name (name);
40547 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
40548 build_function_type_list (void_type_node, NULL_TREE));
40549 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
40550 NULL_TREE, void_type_node);
40551 TREE_PUBLIC (decl) = 1;
40552 TREE_STATIC (decl) = 1;
40554 #if RS6000_WEAK
40555 if (USE_HIDDEN_LINKONCE && !TARGET_XCOFF)
40557 cgraph_node::create (decl)->set_comdat_group (DECL_ASSEMBLER_NAME (decl));
40558 targetm.asm_out.unique_section (decl, 0);
40559 switch_to_section (get_named_section (decl, NULL, 0));
40560 DECL_WEAK (decl) = 1;
40561 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
40562 targetm.asm_out.globalize_label (asm_out_file, name);
40563 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
40564 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
40566 else
40567 #endif
40569 switch_to_section (text_section);
40570 ASM_OUTPUT_LABEL (asm_out_file, name);
40573 DECL_INITIAL (decl) = make_node (BLOCK);
40574 current_function_decl = decl;
40575 allocate_struct_function (decl, false);
40576 init_function_start (decl);
40577 first_function_block_is_cold = false;
40578 /* Make sure unwind info is emitted for the thunk if needed. */
40579 final_start_function (emit_barrier (), asm_out_file, 1);
40581 fputs ("\tblr\n", asm_out_file);
40583 final_end_function ();
40584 init_insn_lengths ();
40585 free_after_compilation (cfun);
40586 set_cfun (NULL);
40587 current_function_decl = NULL;
40590 /* Add r30 to hard reg set if the prologue sets it up and it is not
40591 pic_offset_table_rtx. */
40593 static void
40594 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
40596 if (!TARGET_SINGLE_PIC_BASE
40597 && TARGET_TOC
40598 && TARGET_MINIMAL_TOC
40599 && !constant_pool_empty_p ())
40600 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
40601 if (cfun->machine->split_stack_argp_used)
40602 add_to_hard_reg_set (&set->set, Pmode, 12);
40606 /* Helper function for rs6000_split_logical to emit a logical instruction after
40607 spliting the operation to single GPR registers.
40609 DEST is the destination register.
40610 OP1 and OP2 are the input source registers.
40611 CODE is the base operation (AND, IOR, XOR, NOT).
40612 MODE is the machine mode.
40613 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
40614 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
40615 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
40617 static void
40618 rs6000_split_logical_inner (rtx dest,
40619 rtx op1,
40620 rtx op2,
40621 enum rtx_code code,
40622 machine_mode mode,
40623 bool complement_final_p,
40624 bool complement_op1_p,
40625 bool complement_op2_p)
40627 rtx bool_rtx;
40629 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
40630 if (op2 && GET_CODE (op2) == CONST_INT
40631 && (mode == SImode || (mode == DImode && TARGET_POWERPC64))
40632 && !complement_final_p && !complement_op1_p && !complement_op2_p)
40634 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
40635 HOST_WIDE_INT value = INTVAL (op2) & mask;
40637 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
40638 if (code == AND)
40640 if (value == 0)
40642 emit_insn (gen_rtx_SET (dest, const0_rtx));
40643 return;
40646 else if (value == mask)
40648 if (!rtx_equal_p (dest, op1))
40649 emit_insn (gen_rtx_SET (dest, op1));
40650 return;
40654 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
40655 into separate ORI/ORIS or XORI/XORIS instrucitons. */
40656 else if (code == IOR || code == XOR)
40658 if (value == 0)
40660 if (!rtx_equal_p (dest, op1))
40661 emit_insn (gen_rtx_SET (dest, op1));
40662 return;
40667 if (code == AND && mode == SImode
40668 && !complement_final_p && !complement_op1_p && !complement_op2_p)
40670 emit_insn (gen_andsi3 (dest, op1, op2));
40671 return;
40674 if (complement_op1_p)
40675 op1 = gen_rtx_NOT (mode, op1);
40677 if (complement_op2_p)
40678 op2 = gen_rtx_NOT (mode, op2);
40680 /* For canonical RTL, if only one arm is inverted it is the first. */
40681 if (!complement_op1_p && complement_op2_p)
40682 std::swap (op1, op2);
40684 bool_rtx = ((code == NOT)
40685 ? gen_rtx_NOT (mode, op1)
40686 : gen_rtx_fmt_ee (code, mode, op1, op2));
40688 if (complement_final_p)
40689 bool_rtx = gen_rtx_NOT (mode, bool_rtx);
40691 emit_insn (gen_rtx_SET (dest, bool_rtx));
40694 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
40695 operations are split immediately during RTL generation to allow for more
40696 optimizations of the AND/IOR/XOR.
40698 OPERANDS is an array containing the destination and two input operands.
40699 CODE is the base operation (AND, IOR, XOR, NOT).
40700 MODE is the machine mode.
40701 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
40702 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
40703 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
40704 CLOBBER_REG is either NULL or a scratch register of type CC to allow
40705 formation of the AND instructions. */
40707 static void
40708 rs6000_split_logical_di (rtx operands[3],
40709 enum rtx_code code,
40710 bool complement_final_p,
40711 bool complement_op1_p,
40712 bool complement_op2_p)
40714 const HOST_WIDE_INT lower_32bits = HOST_WIDE_INT_C(0xffffffff);
40715 const HOST_WIDE_INT upper_32bits = ~ lower_32bits;
40716 const HOST_WIDE_INT sign_bit = HOST_WIDE_INT_C(0x80000000);
40717 enum hi_lo { hi = 0, lo = 1 };
40718 rtx op0_hi_lo[2], op1_hi_lo[2], op2_hi_lo[2];
40719 size_t i;
40721 op0_hi_lo[hi] = gen_highpart (SImode, operands[0]);
40722 op1_hi_lo[hi] = gen_highpart (SImode, operands[1]);
40723 op0_hi_lo[lo] = gen_lowpart (SImode, operands[0]);
40724 op1_hi_lo[lo] = gen_lowpart (SImode, operands[1]);
40726 if (code == NOT)
40727 op2_hi_lo[hi] = op2_hi_lo[lo] = NULL_RTX;
40728 else
40730 if (GET_CODE (operands[2]) != CONST_INT)
40732 op2_hi_lo[hi] = gen_highpart_mode (SImode, DImode, operands[2]);
40733 op2_hi_lo[lo] = gen_lowpart (SImode, operands[2]);
40735 else
40737 HOST_WIDE_INT value = INTVAL (operands[2]);
40738 HOST_WIDE_INT value_hi_lo[2];
40740 gcc_assert (!complement_final_p);
40741 gcc_assert (!complement_op1_p);
40742 gcc_assert (!complement_op2_p);
40744 value_hi_lo[hi] = value >> 32;
40745 value_hi_lo[lo] = value & lower_32bits;
40747 for (i = 0; i < 2; i++)
40749 HOST_WIDE_INT sub_value = value_hi_lo[i];
40751 if (sub_value & sign_bit)
40752 sub_value |= upper_32bits;
40754 op2_hi_lo[i] = GEN_INT (sub_value);
40756 /* If this is an AND instruction, check to see if we need to load
40757 the value in a register. */
40758 if (code == AND && sub_value != -1 && sub_value != 0
40759 && !and_operand (op2_hi_lo[i], SImode))
40760 op2_hi_lo[i] = force_reg (SImode, op2_hi_lo[i]);
40765 for (i = 0; i < 2; i++)
40767 /* Split large IOR/XOR operations. */
40768 if ((code == IOR || code == XOR)
40769 && GET_CODE (op2_hi_lo[i]) == CONST_INT
40770 && !complement_final_p
40771 && !complement_op1_p
40772 && !complement_op2_p
40773 && !logical_const_operand (op2_hi_lo[i], SImode))
40775 HOST_WIDE_INT value = INTVAL (op2_hi_lo[i]);
40776 HOST_WIDE_INT hi_16bits = value & HOST_WIDE_INT_C(0xffff0000);
40777 HOST_WIDE_INT lo_16bits = value & HOST_WIDE_INT_C(0x0000ffff);
40778 rtx tmp = gen_reg_rtx (SImode);
40780 /* Make sure the constant is sign extended. */
40781 if ((hi_16bits & sign_bit) != 0)
40782 hi_16bits |= upper_32bits;
40784 rs6000_split_logical_inner (tmp, op1_hi_lo[i], GEN_INT (hi_16bits),
40785 code, SImode, false, false, false);
40787 rs6000_split_logical_inner (op0_hi_lo[i], tmp, GEN_INT (lo_16bits),
40788 code, SImode, false, false, false);
40790 else
40791 rs6000_split_logical_inner (op0_hi_lo[i], op1_hi_lo[i], op2_hi_lo[i],
40792 code, SImode, complement_final_p,
40793 complement_op1_p, complement_op2_p);
40796 return;
40799 /* Split the insns that make up boolean operations operating on multiple GPR
40800 registers. The boolean MD patterns ensure that the inputs either are
40801 exactly the same as the output registers, or there is no overlap.
40803 OPERANDS is an array containing the destination and two input operands.
40804 CODE is the base operation (AND, IOR, XOR, NOT).
40805 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
40806 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
40807 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
40809 void
40810 rs6000_split_logical (rtx operands[3],
40811 enum rtx_code code,
40812 bool complement_final_p,
40813 bool complement_op1_p,
40814 bool complement_op2_p)
40816 machine_mode mode = GET_MODE (operands[0]);
40817 machine_mode sub_mode;
40818 rtx op0, op1, op2;
40819 int sub_size, regno0, regno1, nregs, i;
40821 /* If this is DImode, use the specialized version that can run before
40822 register allocation. */
40823 if (mode == DImode && !TARGET_POWERPC64)
40825 rs6000_split_logical_di (operands, code, complement_final_p,
40826 complement_op1_p, complement_op2_p);
40827 return;
40830 op0 = operands[0];
40831 op1 = operands[1];
40832 op2 = (code == NOT) ? NULL_RTX : operands[2];
40833 sub_mode = (TARGET_POWERPC64) ? DImode : SImode;
40834 sub_size = GET_MODE_SIZE (sub_mode);
40835 regno0 = REGNO (op0);
40836 regno1 = REGNO (op1);
40838 gcc_assert (reload_completed);
40839 gcc_assert (IN_RANGE (regno0, FIRST_GPR_REGNO, LAST_GPR_REGNO));
40840 gcc_assert (IN_RANGE (regno1, FIRST_GPR_REGNO, LAST_GPR_REGNO));
40842 nregs = rs6000_hard_regno_nregs[(int)mode][regno0];
40843 gcc_assert (nregs > 1);
40845 if (op2 && REG_P (op2))
40846 gcc_assert (IN_RANGE (REGNO (op2), FIRST_GPR_REGNO, LAST_GPR_REGNO));
40848 for (i = 0; i < nregs; i++)
40850 int offset = i * sub_size;
40851 rtx sub_op0 = simplify_subreg (sub_mode, op0, mode, offset);
40852 rtx sub_op1 = simplify_subreg (sub_mode, op1, mode, offset);
40853 rtx sub_op2 = ((code == NOT)
40854 ? NULL_RTX
40855 : simplify_subreg (sub_mode, op2, mode, offset));
40857 rs6000_split_logical_inner (sub_op0, sub_op1, sub_op2, code, sub_mode,
40858 complement_final_p, complement_op1_p,
40859 complement_op2_p);
40862 return;
40866 /* Return true if the peephole2 can combine a load involving a combination of
40867 an addis instruction and a load with an offset that can be fused together on
40868 a power8. */
40870 bool
40871 fusion_gpr_load_p (rtx addis_reg, /* register set via addis. */
40872 rtx addis_value, /* addis value. */
40873 rtx target, /* target register that is loaded. */
40874 rtx mem) /* bottom part of the memory addr. */
40876 rtx addr;
40877 rtx base_reg;
40879 /* Validate arguments. */
40880 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
40881 return false;
40883 if (!base_reg_operand (target, GET_MODE (target)))
40884 return false;
40886 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
40887 return false;
40889 /* Allow sign/zero extension. */
40890 if (GET_CODE (mem) == ZERO_EXTEND
40891 || (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN))
40892 mem = XEXP (mem, 0);
40894 if (!MEM_P (mem))
40895 return false;
40897 if (!fusion_gpr_mem_load (mem, GET_MODE (mem)))
40898 return false;
40900 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
40901 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
40902 return false;
40904 /* Validate that the register used to load the high value is either the
40905 register being loaded, or we can safely replace its use.
40907 This function is only called from the peephole2 pass and we assume that
40908 there are 2 instructions in the peephole (addis and load), so we want to
40909 check if the target register was not used in the memory address and the
40910 register to hold the addis result is dead after the peephole. */
40911 if (REGNO (addis_reg) != REGNO (target))
40913 if (reg_mentioned_p (target, mem))
40914 return false;
40916 if (!peep2_reg_dead_p (2, addis_reg))
40917 return false;
40919 /* If the target register being loaded is the stack pointer, we must
40920 avoid loading any other value into it, even temporarily. */
40921 if (REG_P (target) && REGNO (target) == STACK_POINTER_REGNUM)
40922 return false;
40925 base_reg = XEXP (addr, 0);
40926 return REGNO (addis_reg) == REGNO (base_reg);
40929 /* During the peephole2 pass, adjust and expand the insns for a load fusion
40930 sequence. We adjust the addis register to use the target register. If the
40931 load sign extends, we adjust the code to do the zero extending load, and an
40932 explicit sign extension later since the fusion only covers zero extending
40933 loads.
40935 The operands are:
40936 operands[0] register set with addis (to be replaced with target)
40937 operands[1] value set via addis
40938 operands[2] target register being loaded
40939 operands[3] D-form memory reference using operands[0]. */
40941 void
40942 expand_fusion_gpr_load (rtx *operands)
40944 rtx addis_value = operands[1];
40945 rtx target = operands[2];
40946 rtx orig_mem = operands[3];
40947 rtx new_addr, new_mem, orig_addr, offset;
40948 enum rtx_code plus_or_lo_sum;
40949 machine_mode target_mode = GET_MODE (target);
40950 machine_mode extend_mode = target_mode;
40951 machine_mode ptr_mode = Pmode;
40952 enum rtx_code extend = UNKNOWN;
40954 if (GET_CODE (orig_mem) == ZERO_EXTEND
40955 || (TARGET_P8_FUSION_SIGN && GET_CODE (orig_mem) == SIGN_EXTEND))
40957 extend = GET_CODE (orig_mem);
40958 orig_mem = XEXP (orig_mem, 0);
40959 target_mode = GET_MODE (orig_mem);
40962 gcc_assert (MEM_P (orig_mem));
40964 orig_addr = XEXP (orig_mem, 0);
40965 plus_or_lo_sum = GET_CODE (orig_addr);
40966 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
40968 offset = XEXP (orig_addr, 1);
40969 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
40970 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
40972 if (extend != UNKNOWN)
40973 new_mem = gen_rtx_fmt_e (ZERO_EXTEND, extend_mode, new_mem);
40975 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
40976 UNSPEC_FUSION_GPR);
40977 emit_insn (gen_rtx_SET (target, new_mem));
40979 if (extend == SIGN_EXTEND)
40981 int sub_off = ((BYTES_BIG_ENDIAN)
40982 ? GET_MODE_SIZE (extend_mode) - GET_MODE_SIZE (target_mode)
40983 : 0);
40984 rtx sign_reg
40985 = simplify_subreg (target_mode, target, extend_mode, sub_off);
40987 emit_insn (gen_rtx_SET (target,
40988 gen_rtx_SIGN_EXTEND (extend_mode, sign_reg)));
40991 return;
40994 /* Emit the addis instruction that will be part of a fused instruction
40995 sequence. */
40997 void
40998 emit_fusion_addis (rtx target, rtx addis_value, const char *comment,
40999 const char *mode_name)
41001 rtx fuse_ops[10];
41002 char insn_template[80];
41003 const char *addis_str = NULL;
41004 const char *comment_str = ASM_COMMENT_START;
41006 if (*comment_str == ' ')
41007 comment_str++;
41009 /* Emit the addis instruction. */
41010 fuse_ops[0] = target;
41011 if (satisfies_constraint_L (addis_value))
41013 fuse_ops[1] = addis_value;
41014 addis_str = "lis %0,%v1";
41017 else if (GET_CODE (addis_value) == PLUS)
41019 rtx op0 = XEXP (addis_value, 0);
41020 rtx op1 = XEXP (addis_value, 1);
41022 if (REG_P (op0) && CONST_INT_P (op1)
41023 && satisfies_constraint_L (op1))
41025 fuse_ops[1] = op0;
41026 fuse_ops[2] = op1;
41027 addis_str = "addis %0,%1,%v2";
41031 else if (GET_CODE (addis_value) == HIGH)
41033 rtx value = XEXP (addis_value, 0);
41034 if (GET_CODE (value) == UNSPEC && XINT (value, 1) == UNSPEC_TOCREL)
41036 fuse_ops[1] = XVECEXP (value, 0, 0); /* symbol ref. */
41037 fuse_ops[2] = XVECEXP (value, 0, 1); /* TOC register. */
41038 if (TARGET_ELF)
41039 addis_str = "addis %0,%2,%1@toc@ha";
41041 else if (TARGET_XCOFF)
41042 addis_str = "addis %0,%1@u(%2)";
41044 else
41045 gcc_unreachable ();
41048 else if (GET_CODE (value) == PLUS)
41050 rtx op0 = XEXP (value, 0);
41051 rtx op1 = XEXP (value, 1);
41053 if (GET_CODE (op0) == UNSPEC
41054 && XINT (op0, 1) == UNSPEC_TOCREL
41055 && CONST_INT_P (op1))
41057 fuse_ops[1] = XVECEXP (op0, 0, 0); /* symbol ref. */
41058 fuse_ops[2] = XVECEXP (op0, 0, 1); /* TOC register. */
41059 fuse_ops[3] = op1;
41060 if (TARGET_ELF)
41061 addis_str = "addis %0,%2,%1+%3@toc@ha";
41063 else if (TARGET_XCOFF)
41064 addis_str = "addis %0,%1+%3@u(%2)";
41066 else
41067 gcc_unreachable ();
41071 else if (satisfies_constraint_L (value))
41073 fuse_ops[1] = value;
41074 addis_str = "lis %0,%v1";
41077 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (value))
41079 fuse_ops[1] = value;
41080 addis_str = "lis %0,%1@ha";
41084 if (!addis_str)
41085 fatal_insn ("Could not generate addis value for fusion", addis_value);
41087 sprintf (insn_template, "%s\t\t%s %s, type %s", addis_str, comment_str,
41088 comment, mode_name);
41089 output_asm_insn (insn_template, fuse_ops);
41092 /* Emit a D-form load or store instruction that is the second instruction
41093 of a fusion sequence. */
41095 void
41096 emit_fusion_load_store (rtx load_store_reg, rtx addis_reg, rtx offset,
41097 const char *insn_str)
41099 rtx fuse_ops[10];
41100 char insn_template[80];
41102 fuse_ops[0] = load_store_reg;
41103 fuse_ops[1] = addis_reg;
41105 if (CONST_INT_P (offset) && satisfies_constraint_I (offset))
41107 sprintf (insn_template, "%s %%0,%%2(%%1)", insn_str);
41108 fuse_ops[2] = offset;
41109 output_asm_insn (insn_template, fuse_ops);
41112 else if (GET_CODE (offset) == UNSPEC
41113 && XINT (offset, 1) == UNSPEC_TOCREL)
41115 if (TARGET_ELF)
41116 sprintf (insn_template, "%s %%0,%%2@toc@l(%%1)", insn_str);
41118 else if (TARGET_XCOFF)
41119 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
41121 else
41122 gcc_unreachable ();
41124 fuse_ops[2] = XVECEXP (offset, 0, 0);
41125 output_asm_insn (insn_template, fuse_ops);
41128 else if (GET_CODE (offset) == PLUS
41129 && GET_CODE (XEXP (offset, 0)) == UNSPEC
41130 && XINT (XEXP (offset, 0), 1) == UNSPEC_TOCREL
41131 && CONST_INT_P (XEXP (offset, 1)))
41133 rtx tocrel_unspec = XEXP (offset, 0);
41134 if (TARGET_ELF)
41135 sprintf (insn_template, "%s %%0,%%2+%%3@toc@l(%%1)", insn_str);
41137 else if (TARGET_XCOFF)
41138 sprintf (insn_template, "%s %%0,%%2+%%3@l(%%1)", insn_str);
41140 else
41141 gcc_unreachable ();
41143 fuse_ops[2] = XVECEXP (tocrel_unspec, 0, 0);
41144 fuse_ops[3] = XEXP (offset, 1);
41145 output_asm_insn (insn_template, fuse_ops);
41148 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (offset))
41150 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
41152 fuse_ops[2] = offset;
41153 output_asm_insn (insn_template, fuse_ops);
41156 else
41157 fatal_insn ("Unable to generate load/store offset for fusion", offset);
41159 return;
41162 /* Wrap a TOC address that can be fused to indicate that special fusion
41163 processing is needed. */
41166 fusion_wrap_memory_address (rtx old_mem)
41168 rtx old_addr = XEXP (old_mem, 0);
41169 rtvec v = gen_rtvec (1, old_addr);
41170 rtx new_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_FUSION_ADDIS);
41171 return replace_equiv_address_nv (old_mem, new_addr, false);
41174 /* Given an address, convert it into the addis and load offset parts. Addresses
41175 created during the peephole2 process look like:
41176 (lo_sum (high (unspec [(sym)] UNSPEC_TOCREL))
41177 (unspec [(...)] UNSPEC_TOCREL))
41179 Addresses created via toc fusion look like:
41180 (unspec [(unspec [(...)] UNSPEC_TOCREL)] UNSPEC_FUSION_ADDIS)) */
41182 static void
41183 fusion_split_address (rtx addr, rtx *p_hi, rtx *p_lo)
41185 rtx hi, lo;
41187 if (GET_CODE (addr) == UNSPEC && XINT (addr, 1) == UNSPEC_FUSION_ADDIS)
41189 lo = XVECEXP (addr, 0, 0);
41190 hi = gen_rtx_HIGH (Pmode, lo);
41192 else if (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
41194 hi = XEXP (addr, 0);
41195 lo = XEXP (addr, 1);
41197 else
41198 gcc_unreachable ();
41200 *p_hi = hi;
41201 *p_lo = lo;
41204 /* Return a string to fuse an addis instruction with a gpr load to the same
41205 register that we loaded up the addis instruction. The address that is used
41206 is the logical address that was formed during peephole2:
41207 (lo_sum (high) (low-part))
41209 Or the address is the TOC address that is wrapped before register allocation:
41210 (unspec [(addr) (toc-reg)] UNSPEC_FUSION_ADDIS)
41212 The code is complicated, so we call output_asm_insn directly, and just
41213 return "". */
41215 const char *
41216 emit_fusion_gpr_load (rtx target, rtx mem)
41218 rtx addis_value;
41219 rtx addr;
41220 rtx load_offset;
41221 const char *load_str = NULL;
41222 const char *mode_name = NULL;
41223 machine_mode mode;
41225 if (GET_CODE (mem) == ZERO_EXTEND)
41226 mem = XEXP (mem, 0);
41228 gcc_assert (REG_P (target) && MEM_P (mem));
41230 addr = XEXP (mem, 0);
41231 fusion_split_address (addr, &addis_value, &load_offset);
41233 /* Now emit the load instruction to the same register. */
41234 mode = GET_MODE (mem);
41235 switch (mode)
41237 case QImode:
41238 mode_name = "char";
41239 load_str = "lbz";
41240 break;
41242 case HImode:
41243 mode_name = "short";
41244 load_str = "lhz";
41245 break;
41247 case SImode:
41248 case SFmode:
41249 mode_name = (mode == SFmode) ? "float" : "int";
41250 load_str = "lwz";
41251 break;
41253 case DImode:
41254 case DFmode:
41255 gcc_assert (TARGET_POWERPC64);
41256 mode_name = (mode == DFmode) ? "double" : "long";
41257 load_str = "ld";
41258 break;
41260 default:
41261 fatal_insn ("Bad GPR fusion", gen_rtx_SET (target, mem));
41264 /* Emit the addis instruction. */
41265 emit_fusion_addis (target, addis_value, "gpr load fusion", mode_name);
41267 /* Emit the D-form load instruction. */
41268 emit_fusion_load_store (target, target, load_offset, load_str);
41270 return "";
41274 /* Return true if the peephole2 can combine a load/store involving a
41275 combination of an addis instruction and the memory operation. This was
41276 added to the ISA 3.0 (power9) hardware. */
41278 bool
41279 fusion_p9_p (rtx addis_reg, /* register set via addis. */
41280 rtx addis_value, /* addis value. */
41281 rtx dest, /* destination (memory or register). */
41282 rtx src) /* source (register or memory). */
41284 rtx addr, mem, offset;
41285 machine_mode mode = GET_MODE (src);
41287 /* Validate arguments. */
41288 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
41289 return false;
41291 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
41292 return false;
41294 /* Ignore extend operations that are part of the load. */
41295 if (GET_CODE (src) == FLOAT_EXTEND || GET_CODE (src) == ZERO_EXTEND)
41296 src = XEXP (src, 0);
41298 /* Test for memory<-register or register<-memory. */
41299 if (fpr_reg_operand (src, mode) || int_reg_operand (src, mode))
41301 if (!MEM_P (dest))
41302 return false;
41304 mem = dest;
41307 else if (MEM_P (src))
41309 if (!fpr_reg_operand (dest, mode) && !int_reg_operand (dest, mode))
41310 return false;
41312 mem = src;
41315 else
41316 return false;
41318 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
41319 if (GET_CODE (addr) == PLUS)
41321 if (!rtx_equal_p (addis_reg, XEXP (addr, 0)))
41322 return false;
41324 return satisfies_constraint_I (XEXP (addr, 1));
41327 else if (GET_CODE (addr) == LO_SUM)
41329 if (!rtx_equal_p (addis_reg, XEXP (addr, 0)))
41330 return false;
41332 offset = XEXP (addr, 1);
41333 if (TARGET_XCOFF || (TARGET_ELF && TARGET_POWERPC64))
41334 return small_toc_ref (offset, GET_MODE (offset));
41336 else if (TARGET_ELF && !TARGET_POWERPC64)
41337 return CONSTANT_P (offset);
41340 return false;
41343 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
41344 load sequence.
41346 The operands are:
41347 operands[0] register set with addis
41348 operands[1] value set via addis
41349 operands[2] target register being loaded
41350 operands[3] D-form memory reference using operands[0].
41352 This is similar to the fusion introduced with power8, except it scales to
41353 both loads/stores and does not require the result register to be the same as
41354 the base register. At the moment, we only do this if register set with addis
41355 is dead. */
41357 void
41358 expand_fusion_p9_load (rtx *operands)
41360 rtx tmp_reg = operands[0];
41361 rtx addis_value = operands[1];
41362 rtx target = operands[2];
41363 rtx orig_mem = operands[3];
41364 rtx new_addr, new_mem, orig_addr, offset, set, clobber, insn;
41365 enum rtx_code plus_or_lo_sum;
41366 machine_mode target_mode = GET_MODE (target);
41367 machine_mode extend_mode = target_mode;
41368 machine_mode ptr_mode = Pmode;
41369 enum rtx_code extend = UNKNOWN;
41371 if (GET_CODE (orig_mem) == FLOAT_EXTEND || GET_CODE (orig_mem) == ZERO_EXTEND)
41373 extend = GET_CODE (orig_mem);
41374 orig_mem = XEXP (orig_mem, 0);
41375 target_mode = GET_MODE (orig_mem);
41378 gcc_assert (MEM_P (orig_mem));
41380 orig_addr = XEXP (orig_mem, 0);
41381 plus_or_lo_sum = GET_CODE (orig_addr);
41382 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
41384 offset = XEXP (orig_addr, 1);
41385 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
41386 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
41388 if (extend != UNKNOWN)
41389 new_mem = gen_rtx_fmt_e (extend, extend_mode, new_mem);
41391 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
41392 UNSPEC_FUSION_P9);
41394 set = gen_rtx_SET (target, new_mem);
41395 clobber = gen_rtx_CLOBBER (VOIDmode, tmp_reg);
41396 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber));
41397 emit_insn (insn);
41399 return;
41402 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
41403 store sequence.
41405 The operands are:
41406 operands[0] register set with addis
41407 operands[1] value set via addis
41408 operands[2] target D-form memory being stored to
41409 operands[3] register being stored
41411 This is similar to the fusion introduced with power8, except it scales to
41412 both loads/stores and does not require the result register to be the same as
41413 the base register. At the moment, we only do this if register set with addis
41414 is dead. */
41416 void
41417 expand_fusion_p9_store (rtx *operands)
41419 rtx tmp_reg = operands[0];
41420 rtx addis_value = operands[1];
41421 rtx orig_mem = operands[2];
41422 rtx src = operands[3];
41423 rtx new_addr, new_mem, orig_addr, offset, set, clobber, insn, new_src;
41424 enum rtx_code plus_or_lo_sum;
41425 machine_mode target_mode = GET_MODE (orig_mem);
41426 machine_mode ptr_mode = Pmode;
41428 gcc_assert (MEM_P (orig_mem));
41430 orig_addr = XEXP (orig_mem, 0);
41431 plus_or_lo_sum = GET_CODE (orig_addr);
41432 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
41434 offset = XEXP (orig_addr, 1);
41435 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
41436 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
41438 new_src = gen_rtx_UNSPEC (target_mode, gen_rtvec (1, src),
41439 UNSPEC_FUSION_P9);
41441 set = gen_rtx_SET (new_mem, new_src);
41442 clobber = gen_rtx_CLOBBER (VOIDmode, tmp_reg);
41443 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber));
41444 emit_insn (insn);
41446 return;
41449 /* Return a string to fuse an addis instruction with a load using extended
41450 fusion. The address that is used is the logical address that was formed
41451 during peephole2: (lo_sum (high) (low-part))
41453 The code is complicated, so we call output_asm_insn directly, and just
41454 return "". */
41456 const char *
41457 emit_fusion_p9_load (rtx reg, rtx mem, rtx tmp_reg)
41459 machine_mode mode = GET_MODE (reg);
41460 rtx hi;
41461 rtx lo;
41462 rtx addr;
41463 const char *load_string;
41464 int r;
41466 if (GET_CODE (mem) == FLOAT_EXTEND || GET_CODE (mem) == ZERO_EXTEND)
41468 mem = XEXP (mem, 0);
41469 mode = GET_MODE (mem);
41472 if (GET_CODE (reg) == SUBREG)
41474 gcc_assert (SUBREG_BYTE (reg) == 0);
41475 reg = SUBREG_REG (reg);
41478 if (!REG_P (reg))
41479 fatal_insn ("emit_fusion_p9_load, bad reg #1", reg);
41481 r = REGNO (reg);
41482 if (FP_REGNO_P (r))
41484 if (mode == SFmode)
41485 load_string = "lfs";
41486 else if (mode == DFmode || mode == DImode)
41487 load_string = "lfd";
41488 else
41489 gcc_unreachable ();
41491 else if (ALTIVEC_REGNO_P (r) && TARGET_P9_DFORM_SCALAR)
41493 if (mode == SFmode)
41494 load_string = "lxssp";
41495 else if (mode == DFmode || mode == DImode)
41496 load_string = "lxsd";
41497 else
41498 gcc_unreachable ();
41500 else if (INT_REGNO_P (r))
41502 switch (mode)
41504 case QImode:
41505 load_string = "lbz";
41506 break;
41507 case HImode:
41508 load_string = "lhz";
41509 break;
41510 case SImode:
41511 case SFmode:
41512 load_string = "lwz";
41513 break;
41514 case DImode:
41515 case DFmode:
41516 if (!TARGET_POWERPC64)
41517 gcc_unreachable ();
41518 load_string = "ld";
41519 break;
41520 default:
41521 gcc_unreachable ();
41524 else
41525 fatal_insn ("emit_fusion_p9_load, bad reg #2", reg);
41527 if (!MEM_P (mem))
41528 fatal_insn ("emit_fusion_p9_load not MEM", mem);
41530 addr = XEXP (mem, 0);
41531 fusion_split_address (addr, &hi, &lo);
41533 /* Emit the addis instruction. */
41534 emit_fusion_addis (tmp_reg, hi, "power9 load fusion", GET_MODE_NAME (mode));
41536 /* Emit the D-form load instruction. */
41537 emit_fusion_load_store (reg, tmp_reg, lo, load_string);
41539 return "";
41542 /* Return a string to fuse an addis instruction with a store using extended
41543 fusion. The address that is used is the logical address that was formed
41544 during peephole2: (lo_sum (high) (low-part))
41546 The code is complicated, so we call output_asm_insn directly, and just
41547 return "". */
41549 const char *
41550 emit_fusion_p9_store (rtx mem, rtx reg, rtx tmp_reg)
41552 machine_mode mode = GET_MODE (reg);
41553 rtx hi;
41554 rtx lo;
41555 rtx addr;
41556 const char *store_string;
41557 int r;
41559 if (GET_CODE (reg) == SUBREG)
41561 gcc_assert (SUBREG_BYTE (reg) == 0);
41562 reg = SUBREG_REG (reg);
41565 if (!REG_P (reg))
41566 fatal_insn ("emit_fusion_p9_store, bad reg #1", reg);
41568 r = REGNO (reg);
41569 if (FP_REGNO_P (r))
41571 if (mode == SFmode)
41572 store_string = "stfs";
41573 else if (mode == DFmode)
41574 store_string = "stfd";
41575 else
41576 gcc_unreachable ();
41578 else if (ALTIVEC_REGNO_P (r) && TARGET_P9_DFORM_SCALAR)
41580 if (mode == SFmode)
41581 store_string = "stxssp";
41582 else if (mode == DFmode || mode == DImode)
41583 store_string = "stxsd";
41584 else
41585 gcc_unreachable ();
41587 else if (INT_REGNO_P (r))
41589 switch (mode)
41591 case QImode:
41592 store_string = "stb";
41593 break;
41594 case HImode:
41595 store_string = "sth";
41596 break;
41597 case SImode:
41598 case SFmode:
41599 store_string = "stw";
41600 break;
41601 case DImode:
41602 case DFmode:
41603 if (!TARGET_POWERPC64)
41604 gcc_unreachable ();
41605 store_string = "std";
41606 break;
41607 default:
41608 gcc_unreachable ();
41611 else
41612 fatal_insn ("emit_fusion_p9_store, bad reg #2", reg);
41614 if (!MEM_P (mem))
41615 fatal_insn ("emit_fusion_p9_store not MEM", mem);
41617 addr = XEXP (mem, 0);
41618 fusion_split_address (addr, &hi, &lo);
41620 /* Emit the addis instruction. */
41621 emit_fusion_addis (tmp_reg, hi, "power9 store fusion", GET_MODE_NAME (mode));
41623 /* Emit the D-form load instruction. */
41624 emit_fusion_load_store (reg, tmp_reg, lo, store_string);
41626 return "";
41630 /* Analyze vector computations and remove unnecessary doubleword
41631 swaps (xxswapdi instructions). This pass is performed only
41632 for little-endian VSX code generation.
41634 For this specific case, loads and stores of 4x32 and 2x64 vectors
41635 are inefficient. These are implemented using the lvx2dx and
41636 stvx2dx instructions, which invert the order of doublewords in
41637 a vector register. Thus the code generation inserts an xxswapdi
41638 after each such load, and prior to each such store. (For spill
41639 code after register assignment, an additional xxswapdi is inserted
41640 following each store in order to return a hard register to its
41641 unpermuted value.)
41643 The extra xxswapdi instructions reduce performance. This can be
41644 particularly bad for vectorized code. The purpose of this pass
41645 is to reduce the number of xxswapdi instructions required for
41646 correctness.
41648 The primary insight is that much code that operates on vectors
41649 does not care about the relative order of elements in a register,
41650 so long as the correct memory order is preserved. If we have
41651 a computation where all input values are provided by lvxd2x/xxswapdi
41652 sequences, all outputs are stored using xxswapdi/stvxd2x sequences,
41653 and all intermediate computations are pure SIMD (independent of
41654 element order), then all the xxswapdi's associated with the loads
41655 and stores may be removed.
41657 This pass uses some of the infrastructure and logical ideas from
41658 the "web" pass in web.c. We create maximal webs of computations
41659 fitting the description above using union-find. Each such web is
41660 then optimized by removing its unnecessary xxswapdi instructions.
41662 The pass is placed prior to global optimization so that we can
41663 perform the optimization in the safest and simplest way possible;
41664 that is, by replacing each xxswapdi insn with a register copy insn.
41665 Subsequent forward propagation will remove copies where possible.
41667 There are some operations sensitive to element order for which we
41668 can still allow the operation, provided we modify those operations.
41669 These include CONST_VECTORs, for which we must swap the first and
41670 second halves of the constant vector; and SUBREGs, for which we
41671 must adjust the byte offset to account for the swapped doublewords.
41672 A remaining opportunity would be non-immediate-form splats, for
41673 which we should adjust the selected lane of the input. We should
41674 also make code generation adjustments for sum-across operations,
41675 since this is a common vectorizer reduction.
41677 Because we run prior to the first split, we can see loads and stores
41678 here that match *vsx_le_perm_{load,store}_<mode>. These are vanilla
41679 vector loads and stores that have not yet been split into a permuting
41680 load/store and a swap. (One way this can happen is with a builtin
41681 call to vec_vsx_{ld,st}.) We can handle these as well, but rather
41682 than deleting a swap, we convert the load/store into a permuting
41683 load/store (which effectively removes the swap). */
41685 /* Notes on Permutes
41687 We do not currently handle computations that contain permutes. There
41688 is a general transformation that can be performed correctly, but it
41689 may introduce more expensive code than it replaces. To handle these
41690 would require a cost model to determine when to perform the optimization.
41691 This commentary records how this could be done if desired.
41693 The most general permute is something like this (example for V16QI):
41695 (vec_select:V16QI (vec_concat:V32QI (op1:V16QI) (op2:V16QI))
41696 (parallel [(const_int a0) (const_int a1)
41698 (const_int a14) (const_int a15)]))
41700 where a0,...,a15 are in [0,31] and select elements from op1 and op2
41701 to produce in the result.
41703 Regardless of mode, we can convert the PARALLEL to a mask of 16
41704 byte-element selectors. Let's call this M, with M[i] representing
41705 the ith byte-element selector value. Then if we swap doublewords
41706 throughout the computation, we can get correct behavior by replacing
41707 M with M' as follows:
41709 M'[i] = { (M[i]+8)%16 : M[i] in [0,15]
41710 { ((M[i]+8)%16)+16 : M[i] in [16,31]
41712 This seems promising at first, since we are just replacing one mask
41713 with another. But certain masks are preferable to others. If M
41714 is a mask that matches a vmrghh pattern, for example, M' certainly
41715 will not. Instead of a single vmrghh, we would generate a load of
41716 M' and a vperm. So we would need to know how many xxswapd's we can
41717 remove as a result of this transformation to determine if it's
41718 profitable; and preferably the logic would need to be aware of all
41719 the special preferable masks.
41721 Another form of permute is an UNSPEC_VPERM, in which the mask is
41722 already in a register. In some cases, this mask may be a constant
41723 that we can discover with ud-chains, in which case the above
41724 transformation is ok. However, the common usage here is for the
41725 mask to be produced by an UNSPEC_LVSL, in which case the mask
41726 cannot be known at compile time. In such a case we would have to
41727 generate several instructions to compute M' as above at run time,
41728 and a cost model is needed again.
41730 However, when the mask M for an UNSPEC_VPERM is loaded from the
41731 constant pool, we can replace M with M' as above at no cost
41732 beyond adding a constant pool entry. */
41734 /* This is based on the union-find logic in web.c. web_entry_base is
41735 defined in df.h. */
41736 class swap_web_entry : public web_entry_base
41738 public:
41739 /* Pointer to the insn. */
41740 rtx_insn *insn;
41741 /* Set if insn contains a mention of a vector register. All other
41742 fields are undefined if this field is unset. */
41743 unsigned int is_relevant : 1;
41744 /* Set if insn is a load. */
41745 unsigned int is_load : 1;
41746 /* Set if insn is a store. */
41747 unsigned int is_store : 1;
41748 /* Set if insn is a doubleword swap. This can either be a register swap
41749 or a permuting load or store (test is_load and is_store for this). */
41750 unsigned int is_swap : 1;
41751 /* Set if the insn has a live-in use of a parameter register. */
41752 unsigned int is_live_in : 1;
41753 /* Set if the insn has a live-out def of a return register. */
41754 unsigned int is_live_out : 1;
41755 /* Set if the insn contains a subreg reference of a vector register. */
41756 unsigned int contains_subreg : 1;
41757 /* Set if the insn contains a 128-bit integer operand. */
41758 unsigned int is_128_int : 1;
41759 /* Set if this is a call-insn. */
41760 unsigned int is_call : 1;
41761 /* Set if this insn does not perform a vector operation for which
41762 element order matters, or if we know how to fix it up if it does.
41763 Undefined if is_swap is set. */
41764 unsigned int is_swappable : 1;
41765 /* A nonzero value indicates what kind of special handling for this
41766 insn is required if doublewords are swapped. Undefined if
41767 is_swappable is not set. */
41768 unsigned int special_handling : 4;
41769 /* Set if the web represented by this entry cannot be optimized. */
41770 unsigned int web_not_optimizable : 1;
41771 /* Set if this insn should be deleted. */
41772 unsigned int will_delete : 1;
41775 enum special_handling_values {
41776 SH_NONE = 0,
41777 SH_CONST_VECTOR,
41778 SH_SUBREG,
41779 SH_NOSWAP_LD,
41780 SH_NOSWAP_ST,
41781 SH_EXTRACT,
41782 SH_SPLAT,
41783 SH_XXPERMDI,
41784 SH_CONCAT,
41785 SH_VPERM
41788 /* Union INSN with all insns containing definitions that reach USE.
41789 Detect whether USE is live-in to the current function. */
41790 static void
41791 union_defs (swap_web_entry *insn_entry, rtx insn, df_ref use)
41793 struct df_link *link = DF_REF_CHAIN (use);
41795 if (!link)
41796 insn_entry[INSN_UID (insn)].is_live_in = 1;
41798 while (link)
41800 if (DF_REF_IS_ARTIFICIAL (link->ref))
41801 insn_entry[INSN_UID (insn)].is_live_in = 1;
41803 if (DF_REF_INSN_INFO (link->ref))
41805 rtx def_insn = DF_REF_INSN (link->ref);
41806 (void)unionfind_union (insn_entry + INSN_UID (insn),
41807 insn_entry + INSN_UID (def_insn));
41810 link = link->next;
41814 /* Union INSN with all insns containing uses reached from DEF.
41815 Detect whether DEF is live-out from the current function. */
41816 static void
41817 union_uses (swap_web_entry *insn_entry, rtx insn, df_ref def)
41819 struct df_link *link = DF_REF_CHAIN (def);
41821 if (!link)
41822 insn_entry[INSN_UID (insn)].is_live_out = 1;
41824 while (link)
41826 /* This could be an eh use or some other artificial use;
41827 we treat these all the same (killing the optimization). */
41828 if (DF_REF_IS_ARTIFICIAL (link->ref))
41829 insn_entry[INSN_UID (insn)].is_live_out = 1;
41831 if (DF_REF_INSN_INFO (link->ref))
41833 rtx use_insn = DF_REF_INSN (link->ref);
41834 (void)unionfind_union (insn_entry + INSN_UID (insn),
41835 insn_entry + INSN_UID (use_insn));
41838 link = link->next;
41842 /* Return 1 iff INSN is a load insn, including permuting loads that
41843 represent an lvxd2x instruction; else return 0. */
41844 static unsigned int
41845 insn_is_load_p (rtx insn)
41847 rtx body = PATTERN (insn);
41849 if (GET_CODE (body) == SET)
41851 if (GET_CODE (SET_SRC (body)) == MEM)
41852 return 1;
41854 if (GET_CODE (SET_SRC (body)) == VEC_SELECT
41855 && GET_CODE (XEXP (SET_SRC (body), 0)) == MEM)
41856 return 1;
41858 return 0;
41861 if (GET_CODE (body) != PARALLEL)
41862 return 0;
41864 rtx set = XVECEXP (body, 0, 0);
41866 if (GET_CODE (set) == SET && GET_CODE (SET_SRC (set)) == MEM)
41867 return 1;
41869 return 0;
41872 /* Return 1 iff INSN is a store insn, including permuting stores that
41873 represent an stvxd2x instruction; else return 0. */
41874 static unsigned int
41875 insn_is_store_p (rtx insn)
41877 rtx body = PATTERN (insn);
41878 if (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == MEM)
41879 return 1;
41880 if (GET_CODE (body) != PARALLEL)
41881 return 0;
41882 rtx set = XVECEXP (body, 0, 0);
41883 if (GET_CODE (set) == SET && GET_CODE (SET_DEST (set)) == MEM)
41884 return 1;
41885 return 0;
41888 /* Return 1 iff INSN swaps doublewords. This may be a reg-reg swap,
41889 a permuting load, or a permuting store. */
41890 static unsigned int
41891 insn_is_swap_p (rtx insn)
41893 rtx body = PATTERN (insn);
41894 if (GET_CODE (body) != SET)
41895 return 0;
41896 rtx rhs = SET_SRC (body);
41897 if (GET_CODE (rhs) != VEC_SELECT)
41898 return 0;
41899 rtx parallel = XEXP (rhs, 1);
41900 if (GET_CODE (parallel) != PARALLEL)
41901 return 0;
41902 unsigned int len = XVECLEN (parallel, 0);
41903 if (len != 2 && len != 4 && len != 8 && len != 16)
41904 return 0;
41905 for (unsigned int i = 0; i < len / 2; ++i)
41907 rtx op = XVECEXP (parallel, 0, i);
41908 if (GET_CODE (op) != CONST_INT || INTVAL (op) != len / 2 + i)
41909 return 0;
41911 for (unsigned int i = len / 2; i < len; ++i)
41913 rtx op = XVECEXP (parallel, 0, i);
41914 if (GET_CODE (op) != CONST_INT || INTVAL (op) != i - len / 2)
41915 return 0;
41917 return 1;
41920 /* Return TRUE if insn is a swap fed by a load from the constant pool. */
41921 static bool
41922 const_load_sequence_p (swap_web_entry *insn_entry, rtx insn)
41924 unsigned uid = INSN_UID (insn);
41925 if (!insn_entry[uid].is_swap || insn_entry[uid].is_load)
41926 return false;
41928 /* Find the unique use in the swap and locate its def. If the def
41929 isn't unique, punt. */
41930 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
41931 df_ref use;
41932 FOR_EACH_INSN_INFO_USE (use, insn_info)
41934 struct df_link *def_link = DF_REF_CHAIN (use);
41935 if (!def_link || def_link->next)
41936 return false;
41938 rtx def_insn = DF_REF_INSN (def_link->ref);
41939 unsigned uid2 = INSN_UID (def_insn);
41940 if (!insn_entry[uid2].is_load || !insn_entry[uid2].is_swap)
41941 return false;
41943 rtx body = PATTERN (def_insn);
41944 if (GET_CODE (body) != SET
41945 || GET_CODE (SET_SRC (body)) != VEC_SELECT
41946 || GET_CODE (XEXP (SET_SRC (body), 0)) != MEM)
41947 return false;
41949 rtx mem = XEXP (SET_SRC (body), 0);
41950 rtx base_reg = XEXP (mem, 0);
41952 df_ref base_use;
41953 insn_info = DF_INSN_INFO_GET (def_insn);
41954 FOR_EACH_INSN_INFO_USE (base_use, insn_info)
41956 if (!rtx_equal_p (DF_REF_REG (base_use), base_reg))
41957 continue;
41959 struct df_link *base_def_link = DF_REF_CHAIN (base_use);
41960 if (!base_def_link || base_def_link->next)
41961 return false;
41963 rtx tocrel_insn = DF_REF_INSN (base_def_link->ref);
41964 rtx tocrel_body = PATTERN (tocrel_insn);
41965 rtx base, offset;
41966 if (GET_CODE (tocrel_body) != SET)
41967 return false;
41968 /* There is an extra level of indirection for small/large
41969 code models. */
41970 rtx tocrel_expr = SET_SRC (tocrel_body);
41971 if (GET_CODE (tocrel_expr) == MEM)
41972 tocrel_expr = XEXP (tocrel_expr, 0);
41973 if (!toc_relative_expr_p (tocrel_expr, false))
41974 return false;
41975 split_const (XVECEXP (tocrel_base, 0, 0), &base, &offset);
41976 if (GET_CODE (base) != SYMBOL_REF || !CONSTANT_POOL_ADDRESS_P (base))
41977 return false;
41980 return true;
41983 /* Return TRUE iff OP matches a V2DF reduction pattern. See the
41984 definition of vsx_reduc_<VEC_reduc_name>_v2df in vsx.md. */
41985 static bool
41986 v2df_reduction_p (rtx op)
41988 if (GET_MODE (op) != V2DFmode)
41989 return false;
41991 enum rtx_code code = GET_CODE (op);
41992 if (code != PLUS && code != SMIN && code != SMAX)
41993 return false;
41995 rtx concat = XEXP (op, 0);
41996 if (GET_CODE (concat) != VEC_CONCAT)
41997 return false;
41999 rtx select0 = XEXP (concat, 0);
42000 rtx select1 = XEXP (concat, 1);
42001 if (GET_CODE (select0) != VEC_SELECT || GET_CODE (select1) != VEC_SELECT)
42002 return false;
42004 rtx reg0 = XEXP (select0, 0);
42005 rtx reg1 = XEXP (select1, 0);
42006 if (!rtx_equal_p (reg0, reg1) || !REG_P (reg0))
42007 return false;
42009 rtx parallel0 = XEXP (select0, 1);
42010 rtx parallel1 = XEXP (select1, 1);
42011 if (GET_CODE (parallel0) != PARALLEL || GET_CODE (parallel1) != PARALLEL)
42012 return false;
42014 if (!rtx_equal_p (XVECEXP (parallel0, 0, 0), const1_rtx)
42015 || !rtx_equal_p (XVECEXP (parallel1, 0, 0), const0_rtx))
42016 return false;
42018 return true;
42021 /* Return 1 iff OP is an operand that will not be affected by having
42022 vector doublewords swapped in memory. */
42023 static unsigned int
42024 rtx_is_swappable_p (rtx op, unsigned int *special)
42026 enum rtx_code code = GET_CODE (op);
42027 int i, j;
42028 rtx parallel;
42030 switch (code)
42032 case LABEL_REF:
42033 case SYMBOL_REF:
42034 case CLOBBER:
42035 case REG:
42036 return 1;
42038 case VEC_CONCAT:
42039 case ASM_INPUT:
42040 case ASM_OPERANDS:
42041 return 0;
42043 case CONST_VECTOR:
42045 *special = SH_CONST_VECTOR;
42046 return 1;
42049 case VEC_DUPLICATE:
42050 /* Opportunity: If XEXP (op, 0) has the same mode as the result,
42051 and XEXP (op, 1) is a PARALLEL with a single QImode const int,
42052 it represents a vector splat for which we can do special
42053 handling. */
42054 if (GET_CODE (XEXP (op, 0)) == CONST_INT)
42055 return 1;
42056 else if (REG_P (XEXP (op, 0))
42057 && GET_MODE_INNER (GET_MODE (op)) == GET_MODE (XEXP (op, 0)))
42058 /* This catches V2DF and V2DI splat, at a minimum. */
42059 return 1;
42060 else if (GET_CODE (XEXP (op, 0)) == TRUNCATE
42061 && REG_P (XEXP (XEXP (op, 0), 0))
42062 && GET_MODE_INNER (GET_MODE (op)) == GET_MODE (XEXP (op, 0)))
42063 /* This catches splat of a truncated value. */
42064 return 1;
42065 else if (GET_CODE (XEXP (op, 0)) == VEC_SELECT)
42066 /* If the duplicated item is from a select, defer to the select
42067 processing to see if we can change the lane for the splat. */
42068 return rtx_is_swappable_p (XEXP (op, 0), special);
42069 else
42070 return 0;
42072 case VEC_SELECT:
42073 /* A vec_extract operation is ok if we change the lane. */
42074 if (GET_CODE (XEXP (op, 0)) == REG
42075 && GET_MODE_INNER (GET_MODE (XEXP (op, 0))) == GET_MODE (op)
42076 && GET_CODE ((parallel = XEXP (op, 1))) == PARALLEL
42077 && XVECLEN (parallel, 0) == 1
42078 && GET_CODE (XVECEXP (parallel, 0, 0)) == CONST_INT)
42080 *special = SH_EXTRACT;
42081 return 1;
42083 /* An XXPERMDI is ok if we adjust the lanes. Note that if the
42084 XXPERMDI is a swap operation, it will be identified by
42085 insn_is_swap_p and therefore we won't get here. */
42086 else if (GET_CODE (XEXP (op, 0)) == VEC_CONCAT
42087 && (GET_MODE (XEXP (op, 0)) == V4DFmode
42088 || GET_MODE (XEXP (op, 0)) == V4DImode)
42089 && GET_CODE ((parallel = XEXP (op, 1))) == PARALLEL
42090 && XVECLEN (parallel, 0) == 2
42091 && GET_CODE (XVECEXP (parallel, 0, 0)) == CONST_INT
42092 && GET_CODE (XVECEXP (parallel, 0, 1)) == CONST_INT)
42094 *special = SH_XXPERMDI;
42095 return 1;
42097 else if (v2df_reduction_p (op))
42098 return 1;
42099 else
42100 return 0;
42102 case UNSPEC:
42104 /* Various operations are unsafe for this optimization, at least
42105 without significant additional work. Permutes are obviously
42106 problematic, as both the permute control vector and the ordering
42107 of the target values are invalidated by doubleword swapping.
42108 Vector pack and unpack modify the number of vector lanes.
42109 Merge-high/low will not operate correctly on swapped operands.
42110 Vector shifts across element boundaries are clearly uncool,
42111 as are vector select and concatenate operations. Vector
42112 sum-across instructions define one operand with a specific
42113 order-dependent element, so additional fixup code would be
42114 needed to make those work. Vector set and non-immediate-form
42115 vector splat are element-order sensitive. A few of these
42116 cases might be workable with special handling if required.
42117 Adding cost modeling would be appropriate in some cases. */
42118 int val = XINT (op, 1);
42119 switch (val)
42121 default:
42122 break;
42123 case UNSPEC_VMRGH_DIRECT:
42124 case UNSPEC_VMRGL_DIRECT:
42125 case UNSPEC_VPACK_SIGN_SIGN_SAT:
42126 case UNSPEC_VPACK_SIGN_UNS_SAT:
42127 case UNSPEC_VPACK_UNS_UNS_MOD:
42128 case UNSPEC_VPACK_UNS_UNS_MOD_DIRECT:
42129 case UNSPEC_VPACK_UNS_UNS_SAT:
42130 case UNSPEC_VPERM:
42131 case UNSPEC_VPERM_UNS:
42132 case UNSPEC_VPERMHI:
42133 case UNSPEC_VPERMSI:
42134 case UNSPEC_VPKPX:
42135 case UNSPEC_VSLDOI:
42136 case UNSPEC_VSLO:
42137 case UNSPEC_VSRO:
42138 case UNSPEC_VSUM2SWS:
42139 case UNSPEC_VSUM4S:
42140 case UNSPEC_VSUM4UBS:
42141 case UNSPEC_VSUMSWS:
42142 case UNSPEC_VSUMSWS_DIRECT:
42143 case UNSPEC_VSX_CONCAT:
42144 case UNSPEC_VSX_SET:
42145 case UNSPEC_VSX_SLDWI:
42146 case UNSPEC_VUNPACK_HI_SIGN:
42147 case UNSPEC_VUNPACK_HI_SIGN_DIRECT:
42148 case UNSPEC_VUNPACK_LO_SIGN:
42149 case UNSPEC_VUNPACK_LO_SIGN_DIRECT:
42150 case UNSPEC_VUPKHPX:
42151 case UNSPEC_VUPKHS_V4SF:
42152 case UNSPEC_VUPKHU_V4SF:
42153 case UNSPEC_VUPKLPX:
42154 case UNSPEC_VUPKLS_V4SF:
42155 case UNSPEC_VUPKLU_V4SF:
42156 case UNSPEC_VSX_CVDPSPN:
42157 case UNSPEC_VSX_CVSPDP:
42158 case UNSPEC_VSX_CVSPDPN:
42159 case UNSPEC_VSX_EXTRACT:
42160 case UNSPEC_VSX_VSLO:
42161 case UNSPEC_VSX_VEC_INIT:
42162 return 0;
42163 case UNSPEC_VSPLT_DIRECT:
42164 case UNSPEC_VSX_XXSPLTD:
42165 *special = SH_SPLAT;
42166 return 1;
42167 case UNSPEC_REDUC_PLUS:
42168 case UNSPEC_REDUC:
42169 return 1;
42173 default:
42174 break;
42177 const char *fmt = GET_RTX_FORMAT (code);
42178 int ok = 1;
42180 for (i = 0; i < GET_RTX_LENGTH (code); ++i)
42181 if (fmt[i] == 'e' || fmt[i] == 'u')
42183 unsigned int special_op = SH_NONE;
42184 ok &= rtx_is_swappable_p (XEXP (op, i), &special_op);
42185 if (special_op == SH_NONE)
42186 continue;
42187 /* Ensure we never have two kinds of special handling
42188 for the same insn. */
42189 if (*special != SH_NONE && *special != special_op)
42190 return 0;
42191 *special = special_op;
42193 else if (fmt[i] == 'E')
42194 for (j = 0; j < XVECLEN (op, i); ++j)
42196 unsigned int special_op = SH_NONE;
42197 ok &= rtx_is_swappable_p (XVECEXP (op, i, j), &special_op);
42198 if (special_op == SH_NONE)
42199 continue;
42200 /* Ensure we never have two kinds of special handling
42201 for the same insn. */
42202 if (*special != SH_NONE && *special != special_op)
42203 return 0;
42204 *special = special_op;
42207 return ok;
42210 /* Return 1 iff INSN is an operand that will not be affected by
42211 having vector doublewords swapped in memory (in which case
42212 *SPECIAL is unchanged), or that can be modified to be correct
42213 if vector doublewords are swapped in memory (in which case
42214 *SPECIAL is changed to a value indicating how). */
42215 static unsigned int
42216 insn_is_swappable_p (swap_web_entry *insn_entry, rtx insn,
42217 unsigned int *special)
42219 /* Calls are always bad. */
42220 if (GET_CODE (insn) == CALL_INSN)
42221 return 0;
42223 /* Loads and stores seen here are not permuting, but we can still
42224 fix them up by converting them to permuting ones. Exceptions:
42225 UNSPEC_LVE, UNSPEC_LVX, and UNSPEC_STVX, which have a PARALLEL
42226 body instead of a SET; and UNSPEC_STVE, which has an UNSPEC
42227 for the SET source. Also we must now make an exception for lvx
42228 and stvx when they are not in the UNSPEC_LVX/STVX form (with the
42229 explicit "& -16") since this leads to unrecognizable insns. */
42230 rtx body = PATTERN (insn);
42231 int i = INSN_UID (insn);
42233 if (insn_entry[i].is_load)
42235 if (GET_CODE (body) == SET)
42237 rtx rhs = SET_SRC (body);
42238 /* Even without a swap, the RHS might be a vec_select for, say,
42239 a byte-reversing load. */
42240 if (GET_CODE (rhs) != MEM)
42241 return 0;
42242 if (GET_CODE (XEXP (rhs, 0)) == AND)
42243 return 0;
42245 *special = SH_NOSWAP_LD;
42246 return 1;
42248 else
42249 return 0;
42252 if (insn_entry[i].is_store)
42254 if (GET_CODE (body) == SET
42255 && GET_CODE (SET_SRC (body)) != UNSPEC)
42257 rtx lhs = SET_DEST (body);
42258 /* Even without a swap, the LHS might be a vec_select for, say,
42259 a byte-reversing store. */
42260 if (GET_CODE (lhs) != MEM)
42261 return 0;
42262 if (GET_CODE (XEXP (lhs, 0)) == AND)
42263 return 0;
42265 *special = SH_NOSWAP_ST;
42266 return 1;
42268 else
42269 return 0;
42272 /* A convert to single precision can be left as is provided that
42273 all of its uses are in xxspltw instructions that splat BE element
42274 zero. */
42275 if (GET_CODE (body) == SET
42276 && GET_CODE (SET_SRC (body)) == UNSPEC
42277 && XINT (SET_SRC (body), 1) == UNSPEC_VSX_CVDPSPN)
42279 df_ref def;
42280 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
42282 FOR_EACH_INSN_INFO_DEF (def, insn_info)
42284 struct df_link *link = DF_REF_CHAIN (def);
42285 if (!link)
42286 return 0;
42288 for (; link; link = link->next) {
42289 rtx use_insn = DF_REF_INSN (link->ref);
42290 rtx use_body = PATTERN (use_insn);
42291 if (GET_CODE (use_body) != SET
42292 || GET_CODE (SET_SRC (use_body)) != UNSPEC
42293 || XINT (SET_SRC (use_body), 1) != UNSPEC_VSX_XXSPLTW
42294 || XVECEXP (SET_SRC (use_body), 0, 1) != const0_rtx)
42295 return 0;
42299 return 1;
42302 /* A concatenation of two doublewords is ok if we reverse the
42303 order of the inputs. */
42304 if (GET_CODE (body) == SET
42305 && GET_CODE (SET_SRC (body)) == VEC_CONCAT
42306 && (GET_MODE (SET_SRC (body)) == V2DFmode
42307 || GET_MODE (SET_SRC (body)) == V2DImode))
42309 *special = SH_CONCAT;
42310 return 1;
42313 /* V2DF reductions are always swappable. */
42314 if (GET_CODE (body) == PARALLEL)
42316 rtx expr = XVECEXP (body, 0, 0);
42317 if (GET_CODE (expr) == SET
42318 && v2df_reduction_p (SET_SRC (expr)))
42319 return 1;
42322 /* An UNSPEC_VPERM is ok if the mask operand is loaded from the
42323 constant pool. */
42324 if (GET_CODE (body) == SET
42325 && GET_CODE (SET_SRC (body)) == UNSPEC
42326 && XINT (SET_SRC (body), 1) == UNSPEC_VPERM
42327 && XVECLEN (SET_SRC (body), 0) == 3
42328 && GET_CODE (XVECEXP (SET_SRC (body), 0, 2)) == REG)
42330 rtx mask_reg = XVECEXP (SET_SRC (body), 0, 2);
42331 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
42332 df_ref use;
42333 FOR_EACH_INSN_INFO_USE (use, insn_info)
42334 if (rtx_equal_p (DF_REF_REG (use), mask_reg))
42336 struct df_link *def_link = DF_REF_CHAIN (use);
42337 /* Punt if multiple definitions for this reg. */
42338 if (def_link && !def_link->next &&
42339 const_load_sequence_p (insn_entry,
42340 DF_REF_INSN (def_link->ref)))
42342 *special = SH_VPERM;
42343 return 1;
42348 /* Otherwise check the operands for vector lane violations. */
42349 return rtx_is_swappable_p (body, special);
42352 enum chain_purpose { FOR_LOADS, FOR_STORES };
42354 /* Return true if the UD or DU chain headed by LINK is non-empty,
42355 and every entry on the chain references an insn that is a
42356 register swap. Furthermore, if PURPOSE is FOR_LOADS, each such
42357 register swap must have only permuting loads as reaching defs.
42358 If PURPOSE is FOR_STORES, each such register swap must have only
42359 register swaps or permuting stores as reached uses. */
42360 static bool
42361 chain_contains_only_swaps (swap_web_entry *insn_entry, struct df_link *link,
42362 enum chain_purpose purpose)
42364 if (!link)
42365 return false;
42367 for (; link; link = link->next)
42369 if (!ALTIVEC_OR_VSX_VECTOR_MODE (GET_MODE (DF_REF_REG (link->ref))))
42370 continue;
42372 if (DF_REF_IS_ARTIFICIAL (link->ref))
42373 return false;
42375 rtx reached_insn = DF_REF_INSN (link->ref);
42376 unsigned uid = INSN_UID (reached_insn);
42377 struct df_insn_info *insn_info = DF_INSN_INFO_GET (reached_insn);
42379 if (!insn_entry[uid].is_swap || insn_entry[uid].is_load
42380 || insn_entry[uid].is_store)
42381 return false;
42383 if (purpose == FOR_LOADS)
42385 df_ref use;
42386 FOR_EACH_INSN_INFO_USE (use, insn_info)
42388 struct df_link *swap_link = DF_REF_CHAIN (use);
42390 while (swap_link)
42392 if (DF_REF_IS_ARTIFICIAL (link->ref))
42393 return false;
42395 rtx swap_def_insn = DF_REF_INSN (swap_link->ref);
42396 unsigned uid2 = INSN_UID (swap_def_insn);
42398 /* Only permuting loads are allowed. */
42399 if (!insn_entry[uid2].is_swap || !insn_entry[uid2].is_load)
42400 return false;
42402 swap_link = swap_link->next;
42406 else if (purpose == FOR_STORES)
42408 df_ref def;
42409 FOR_EACH_INSN_INFO_DEF (def, insn_info)
42411 struct df_link *swap_link = DF_REF_CHAIN (def);
42413 while (swap_link)
42415 if (DF_REF_IS_ARTIFICIAL (link->ref))
42416 return false;
42418 rtx swap_use_insn = DF_REF_INSN (swap_link->ref);
42419 unsigned uid2 = INSN_UID (swap_use_insn);
42421 /* Permuting stores or register swaps are allowed. */
42422 if (!insn_entry[uid2].is_swap || insn_entry[uid2].is_load)
42423 return false;
42425 swap_link = swap_link->next;
42431 return true;
42434 /* Mark the xxswapdi instructions associated with permuting loads and
42435 stores for removal. Note that we only flag them for deletion here,
42436 as there is a possibility of a swap being reached from multiple
42437 loads, etc. */
42438 static void
42439 mark_swaps_for_removal (swap_web_entry *insn_entry, unsigned int i)
42441 rtx insn = insn_entry[i].insn;
42442 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
42444 if (insn_entry[i].is_load)
42446 df_ref def;
42447 FOR_EACH_INSN_INFO_DEF (def, insn_info)
42449 struct df_link *link = DF_REF_CHAIN (def);
42451 /* We know by now that these are swaps, so we can delete
42452 them confidently. */
42453 while (link)
42455 rtx use_insn = DF_REF_INSN (link->ref);
42456 insn_entry[INSN_UID (use_insn)].will_delete = 1;
42457 link = link->next;
42461 else if (insn_entry[i].is_store)
42463 df_ref use;
42464 FOR_EACH_INSN_INFO_USE (use, insn_info)
42466 /* Ignore uses for addressability. */
42467 machine_mode mode = GET_MODE (DF_REF_REG (use));
42468 if (!ALTIVEC_OR_VSX_VECTOR_MODE (mode))
42469 continue;
42471 struct df_link *link = DF_REF_CHAIN (use);
42473 /* We know by now that these are swaps, so we can delete
42474 them confidently. */
42475 while (link)
42477 rtx def_insn = DF_REF_INSN (link->ref);
42478 insn_entry[INSN_UID (def_insn)].will_delete = 1;
42479 link = link->next;
42485 /* OP is either a CONST_VECTOR or an expression containing one.
42486 Swap the first half of the vector with the second in the first
42487 case. Recurse to find it in the second. */
42488 static void
42489 swap_const_vector_halves (rtx op)
42491 int i;
42492 enum rtx_code code = GET_CODE (op);
42493 if (GET_CODE (op) == CONST_VECTOR)
42495 int half_units = GET_MODE_NUNITS (GET_MODE (op)) / 2;
42496 for (i = 0; i < half_units; ++i)
42498 rtx temp = CONST_VECTOR_ELT (op, i);
42499 CONST_VECTOR_ELT (op, i) = CONST_VECTOR_ELT (op, i + half_units);
42500 CONST_VECTOR_ELT (op, i + half_units) = temp;
42503 else
42505 int j;
42506 const char *fmt = GET_RTX_FORMAT (code);
42507 for (i = 0; i < GET_RTX_LENGTH (code); ++i)
42508 if (fmt[i] == 'e' || fmt[i] == 'u')
42509 swap_const_vector_halves (XEXP (op, i));
42510 else if (fmt[i] == 'E')
42511 for (j = 0; j < XVECLEN (op, i); ++j)
42512 swap_const_vector_halves (XVECEXP (op, i, j));
42516 /* Find all subregs of a vector expression that perform a narrowing,
42517 and adjust the subreg index to account for doubleword swapping. */
42518 static void
42519 adjust_subreg_index (rtx op)
42521 enum rtx_code code = GET_CODE (op);
42522 if (code == SUBREG
42523 && (GET_MODE_SIZE (GET_MODE (op))
42524 < GET_MODE_SIZE (GET_MODE (XEXP (op, 0)))))
42526 unsigned int index = SUBREG_BYTE (op);
42527 if (index < 8)
42528 index += 8;
42529 else
42530 index -= 8;
42531 SUBREG_BYTE (op) = index;
42534 const char *fmt = GET_RTX_FORMAT (code);
42535 int i,j;
42536 for (i = 0; i < GET_RTX_LENGTH (code); ++i)
42537 if (fmt[i] == 'e' || fmt[i] == 'u')
42538 adjust_subreg_index (XEXP (op, i));
42539 else if (fmt[i] == 'E')
42540 for (j = 0; j < XVECLEN (op, i); ++j)
42541 adjust_subreg_index (XVECEXP (op, i, j));
42544 /* Convert the non-permuting load INSN to a permuting one. */
42545 static void
42546 permute_load (rtx_insn *insn)
42548 rtx body = PATTERN (insn);
42549 rtx mem_op = SET_SRC (body);
42550 rtx tgt_reg = SET_DEST (body);
42551 machine_mode mode = GET_MODE (tgt_reg);
42552 int n_elts = GET_MODE_NUNITS (mode);
42553 int half_elts = n_elts / 2;
42554 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
42555 int i, j;
42556 for (i = 0, j = half_elts; i < half_elts; ++i, ++j)
42557 XVECEXP (par, 0, i) = GEN_INT (j);
42558 for (i = half_elts, j = 0; j < half_elts; ++i, ++j)
42559 XVECEXP (par, 0, i) = GEN_INT (j);
42560 rtx sel = gen_rtx_VEC_SELECT (mode, mem_op, par);
42561 SET_SRC (body) = sel;
42562 INSN_CODE (insn) = -1; /* Force re-recognition. */
42563 df_insn_rescan (insn);
42565 if (dump_file)
42566 fprintf (dump_file, "Replacing load %d with permuted load\n",
42567 INSN_UID (insn));
42570 /* Convert the non-permuting store INSN to a permuting one. */
42571 static void
42572 permute_store (rtx_insn *insn)
42574 rtx body = PATTERN (insn);
42575 rtx src_reg = SET_SRC (body);
42576 machine_mode mode = GET_MODE (src_reg);
42577 int n_elts = GET_MODE_NUNITS (mode);
42578 int half_elts = n_elts / 2;
42579 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
42580 int i, j;
42581 for (i = 0, j = half_elts; i < half_elts; ++i, ++j)
42582 XVECEXP (par, 0, i) = GEN_INT (j);
42583 for (i = half_elts, j = 0; j < half_elts; ++i, ++j)
42584 XVECEXP (par, 0, i) = GEN_INT (j);
42585 rtx sel = gen_rtx_VEC_SELECT (mode, src_reg, par);
42586 SET_SRC (body) = sel;
42587 INSN_CODE (insn) = -1; /* Force re-recognition. */
42588 df_insn_rescan (insn);
42590 if (dump_file)
42591 fprintf (dump_file, "Replacing store %d with permuted store\n",
42592 INSN_UID (insn));
42595 /* Given OP that contains a vector extract operation, adjust the index
42596 of the extracted lane to account for the doubleword swap. */
42597 static void
42598 adjust_extract (rtx_insn *insn)
42600 rtx pattern = PATTERN (insn);
42601 if (GET_CODE (pattern) == PARALLEL)
42602 pattern = XVECEXP (pattern, 0, 0);
42603 rtx src = SET_SRC (pattern);
42604 /* The vec_select may be wrapped in a vec_duplicate for a splat, so
42605 account for that. */
42606 rtx sel = GET_CODE (src) == VEC_DUPLICATE ? XEXP (src, 0) : src;
42607 rtx par = XEXP (sel, 1);
42608 int half_elts = GET_MODE_NUNITS (GET_MODE (XEXP (sel, 0))) >> 1;
42609 int lane = INTVAL (XVECEXP (par, 0, 0));
42610 lane = lane >= half_elts ? lane - half_elts : lane + half_elts;
42611 XVECEXP (par, 0, 0) = GEN_INT (lane);
42612 INSN_CODE (insn) = -1; /* Force re-recognition. */
42613 df_insn_rescan (insn);
42615 if (dump_file)
42616 fprintf (dump_file, "Changing lane for extract %d\n", INSN_UID (insn));
42619 /* Given OP that contains a vector direct-splat operation, adjust the index
42620 of the source lane to account for the doubleword swap. */
42621 static void
42622 adjust_splat (rtx_insn *insn)
42624 rtx body = PATTERN (insn);
42625 rtx unspec = XEXP (body, 1);
42626 int half_elts = GET_MODE_NUNITS (GET_MODE (unspec)) >> 1;
42627 int lane = INTVAL (XVECEXP (unspec, 0, 1));
42628 lane = lane >= half_elts ? lane - half_elts : lane + half_elts;
42629 XVECEXP (unspec, 0, 1) = GEN_INT (lane);
42630 INSN_CODE (insn) = -1; /* Force re-recognition. */
42631 df_insn_rescan (insn);
42633 if (dump_file)
42634 fprintf (dump_file, "Changing lane for splat %d\n", INSN_UID (insn));
42637 /* Given OP that contains an XXPERMDI operation (that is not a doubleword
42638 swap), reverse the order of the source operands and adjust the indices
42639 of the source lanes to account for doubleword reversal. */
42640 static void
42641 adjust_xxpermdi (rtx_insn *insn)
42643 rtx set = PATTERN (insn);
42644 rtx select = XEXP (set, 1);
42645 rtx concat = XEXP (select, 0);
42646 rtx src0 = XEXP (concat, 0);
42647 XEXP (concat, 0) = XEXP (concat, 1);
42648 XEXP (concat, 1) = src0;
42649 rtx parallel = XEXP (select, 1);
42650 int lane0 = INTVAL (XVECEXP (parallel, 0, 0));
42651 int lane1 = INTVAL (XVECEXP (parallel, 0, 1));
42652 int new_lane0 = 3 - lane1;
42653 int new_lane1 = 3 - lane0;
42654 XVECEXP (parallel, 0, 0) = GEN_INT (new_lane0);
42655 XVECEXP (parallel, 0, 1) = GEN_INT (new_lane1);
42656 INSN_CODE (insn) = -1; /* Force re-recognition. */
42657 df_insn_rescan (insn);
42659 if (dump_file)
42660 fprintf (dump_file, "Changing lanes for xxpermdi %d\n", INSN_UID (insn));
42663 /* Given OP that contains a VEC_CONCAT operation of two doublewords,
42664 reverse the order of those inputs. */
42665 static void
42666 adjust_concat (rtx_insn *insn)
42668 rtx set = PATTERN (insn);
42669 rtx concat = XEXP (set, 1);
42670 rtx src0 = XEXP (concat, 0);
42671 XEXP (concat, 0) = XEXP (concat, 1);
42672 XEXP (concat, 1) = src0;
42673 INSN_CODE (insn) = -1; /* Force re-recognition. */
42674 df_insn_rescan (insn);
42676 if (dump_file)
42677 fprintf (dump_file, "Reversing inputs for concat %d\n", INSN_UID (insn));
42680 /* Given an UNSPEC_VPERM insn, modify the mask loaded from the
42681 constant pool to reflect swapped doublewords. */
42682 static void
42683 adjust_vperm (rtx_insn *insn)
42685 /* We previously determined that the UNSPEC_VPERM was fed by a
42686 swap of a swapping load of a TOC-relative constant pool symbol.
42687 Find the MEM in the swapping load and replace it with a MEM for
42688 the adjusted mask constant. */
42689 rtx set = PATTERN (insn);
42690 rtx mask_reg = XVECEXP (SET_SRC (set), 0, 2);
42692 /* Find the swap. */
42693 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
42694 df_ref use;
42695 rtx_insn *swap_insn = 0;
42696 FOR_EACH_INSN_INFO_USE (use, insn_info)
42697 if (rtx_equal_p (DF_REF_REG (use), mask_reg))
42699 struct df_link *def_link = DF_REF_CHAIN (use);
42700 gcc_assert (def_link && !def_link->next);
42701 swap_insn = DF_REF_INSN (def_link->ref);
42702 break;
42704 gcc_assert (swap_insn);
42706 /* Find the load. */
42707 insn_info = DF_INSN_INFO_GET (swap_insn);
42708 rtx_insn *load_insn = 0;
42709 FOR_EACH_INSN_INFO_USE (use, insn_info)
42711 struct df_link *def_link = DF_REF_CHAIN (use);
42712 gcc_assert (def_link && !def_link->next);
42713 load_insn = DF_REF_INSN (def_link->ref);
42714 break;
42716 gcc_assert (load_insn);
42718 /* Find the TOC-relative symbol access. */
42719 insn_info = DF_INSN_INFO_GET (load_insn);
42720 rtx_insn *tocrel_insn = 0;
42721 FOR_EACH_INSN_INFO_USE (use, insn_info)
42723 struct df_link *def_link = DF_REF_CHAIN (use);
42724 gcc_assert (def_link && !def_link->next);
42725 tocrel_insn = DF_REF_INSN (def_link->ref);
42726 break;
42728 gcc_assert (tocrel_insn);
42730 /* Find the embedded CONST_VECTOR. We have to call toc_relative_expr_p
42731 to set tocrel_base; otherwise it would be unnecessary as we've
42732 already established it will return true. */
42733 rtx base, offset;
42734 rtx tocrel_expr = SET_SRC (PATTERN (tocrel_insn));
42735 /* There is an extra level of indirection for small/large code models. */
42736 if (GET_CODE (tocrel_expr) == MEM)
42737 tocrel_expr = XEXP (tocrel_expr, 0);
42738 if (!toc_relative_expr_p (tocrel_expr, false))
42739 gcc_unreachable ();
42740 split_const (XVECEXP (tocrel_base, 0, 0), &base, &offset);
42741 rtx const_vector = get_pool_constant (base);
42742 /* With the extra indirection, get_pool_constant will produce the
42743 real constant from the reg_equal expression, so get the real
42744 constant. */
42745 if (GET_CODE (const_vector) == SYMBOL_REF)
42746 const_vector = get_pool_constant (const_vector);
42747 gcc_assert (GET_CODE (const_vector) == CONST_VECTOR);
42749 /* Create an adjusted mask from the initial mask. */
42750 unsigned int new_mask[16], i, val;
42751 for (i = 0; i < 16; ++i) {
42752 val = INTVAL (XVECEXP (const_vector, 0, i));
42753 if (val < 16)
42754 new_mask[i] = (val + 8) % 16;
42755 else
42756 new_mask[i] = ((val + 8) % 16) + 16;
42759 /* Create a new CONST_VECTOR and a MEM that references it. */
42760 rtx vals = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
42761 for (i = 0; i < 16; ++i)
42762 XVECEXP (vals, 0, i) = GEN_INT (new_mask[i]);
42763 rtx new_const_vector = gen_rtx_CONST_VECTOR (V16QImode, XVEC (vals, 0));
42764 rtx new_mem = force_const_mem (V16QImode, new_const_vector);
42765 /* This gives us a MEM whose base operand is a SYMBOL_REF, which we
42766 can't recognize. Force the SYMBOL_REF into a register. */
42767 if (!REG_P (XEXP (new_mem, 0))) {
42768 rtx base_reg = force_reg (Pmode, XEXP (new_mem, 0));
42769 XEXP (new_mem, 0) = base_reg;
42770 /* Move the newly created insn ahead of the load insn. */
42771 rtx_insn *force_insn = get_last_insn ();
42772 remove_insn (force_insn);
42773 rtx_insn *before_load_insn = PREV_INSN (load_insn);
42774 add_insn_after (force_insn, before_load_insn, BLOCK_FOR_INSN (load_insn));
42775 df_insn_rescan (before_load_insn);
42776 df_insn_rescan (force_insn);
42779 /* Replace the MEM in the load instruction and rescan it. */
42780 XEXP (SET_SRC (PATTERN (load_insn)), 0) = new_mem;
42781 INSN_CODE (load_insn) = -1; /* Force re-recognition. */
42782 df_insn_rescan (load_insn);
42784 if (dump_file)
42785 fprintf (dump_file, "Adjusting mask for vperm %d\n", INSN_UID (insn));
42788 /* The insn described by INSN_ENTRY[I] can be swapped, but only
42789 with special handling. Take care of that here. */
42790 static void
42791 handle_special_swappables (swap_web_entry *insn_entry, unsigned i)
42793 rtx_insn *insn = insn_entry[i].insn;
42794 rtx body = PATTERN (insn);
42796 switch (insn_entry[i].special_handling)
42798 default:
42799 gcc_unreachable ();
42800 case SH_CONST_VECTOR:
42802 /* A CONST_VECTOR will only show up somewhere in the RHS of a SET. */
42803 gcc_assert (GET_CODE (body) == SET);
42804 rtx rhs = SET_SRC (body);
42805 swap_const_vector_halves (rhs);
42806 if (dump_file)
42807 fprintf (dump_file, "Swapping constant halves in insn %d\n", i);
42808 break;
42810 case SH_SUBREG:
42811 /* A subreg of the same size is already safe. For subregs that
42812 select a smaller portion of a reg, adjust the index for
42813 swapped doublewords. */
42814 adjust_subreg_index (body);
42815 if (dump_file)
42816 fprintf (dump_file, "Adjusting subreg in insn %d\n", i);
42817 break;
42818 case SH_NOSWAP_LD:
42819 /* Convert a non-permuting load to a permuting one. */
42820 permute_load (insn);
42821 break;
42822 case SH_NOSWAP_ST:
42823 /* Convert a non-permuting store to a permuting one. */
42824 permute_store (insn);
42825 break;
42826 case SH_EXTRACT:
42827 /* Change the lane on an extract operation. */
42828 adjust_extract (insn);
42829 break;
42830 case SH_SPLAT:
42831 /* Change the lane on a direct-splat operation. */
42832 adjust_splat (insn);
42833 break;
42834 case SH_XXPERMDI:
42835 /* Change the lanes on an XXPERMDI operation. */
42836 adjust_xxpermdi (insn);
42837 break;
42838 case SH_CONCAT:
42839 /* Reverse the order of a concatenation operation. */
42840 adjust_concat (insn);
42841 break;
42842 case SH_VPERM:
42843 /* Change the mask loaded from the constant pool for a VPERM. */
42844 adjust_vperm (insn);
42845 break;
42849 /* Find the insn from the Ith table entry, which is known to be a
42850 register swap Y = SWAP(X). Replace it with a copy Y = X. */
42851 static void
42852 replace_swap_with_copy (swap_web_entry *insn_entry, unsigned i)
42854 rtx_insn *insn = insn_entry[i].insn;
42855 rtx body = PATTERN (insn);
42856 rtx src_reg = XEXP (SET_SRC (body), 0);
42857 rtx copy = gen_rtx_SET (SET_DEST (body), src_reg);
42858 rtx_insn *new_insn = emit_insn_before (copy, insn);
42859 set_block_for_insn (new_insn, BLOCK_FOR_INSN (insn));
42860 df_insn_rescan (new_insn);
42862 if (dump_file)
42864 unsigned int new_uid = INSN_UID (new_insn);
42865 fprintf (dump_file, "Replacing swap %d with copy %d\n", i, new_uid);
42868 df_insn_delete (insn);
42869 remove_insn (insn);
42870 insn->set_deleted ();
42873 /* Dump the swap table to DUMP_FILE. */
42874 static void
42875 dump_swap_insn_table (swap_web_entry *insn_entry)
42877 int e = get_max_uid ();
42878 fprintf (dump_file, "\nRelevant insns with their flag settings\n\n");
42880 for (int i = 0; i < e; ++i)
42881 if (insn_entry[i].is_relevant)
42883 swap_web_entry *pred_entry = (swap_web_entry *)insn_entry[i].pred ();
42884 fprintf (dump_file, "%6d %6d ", i,
42885 pred_entry && pred_entry->insn
42886 ? INSN_UID (pred_entry->insn) : 0);
42887 if (insn_entry[i].is_load)
42888 fputs ("load ", dump_file);
42889 if (insn_entry[i].is_store)
42890 fputs ("store ", dump_file);
42891 if (insn_entry[i].is_swap)
42892 fputs ("swap ", dump_file);
42893 if (insn_entry[i].is_live_in)
42894 fputs ("live-in ", dump_file);
42895 if (insn_entry[i].is_live_out)
42896 fputs ("live-out ", dump_file);
42897 if (insn_entry[i].contains_subreg)
42898 fputs ("subreg ", dump_file);
42899 if (insn_entry[i].is_128_int)
42900 fputs ("int128 ", dump_file);
42901 if (insn_entry[i].is_call)
42902 fputs ("call ", dump_file);
42903 if (insn_entry[i].is_swappable)
42905 fputs ("swappable ", dump_file);
42906 if (insn_entry[i].special_handling == SH_CONST_VECTOR)
42907 fputs ("special:constvec ", dump_file);
42908 else if (insn_entry[i].special_handling == SH_SUBREG)
42909 fputs ("special:subreg ", dump_file);
42910 else if (insn_entry[i].special_handling == SH_NOSWAP_LD)
42911 fputs ("special:load ", dump_file);
42912 else if (insn_entry[i].special_handling == SH_NOSWAP_ST)
42913 fputs ("special:store ", dump_file);
42914 else if (insn_entry[i].special_handling == SH_EXTRACT)
42915 fputs ("special:extract ", dump_file);
42916 else if (insn_entry[i].special_handling == SH_SPLAT)
42917 fputs ("special:splat ", dump_file);
42918 else if (insn_entry[i].special_handling == SH_XXPERMDI)
42919 fputs ("special:xxpermdi ", dump_file);
42920 else if (insn_entry[i].special_handling == SH_CONCAT)
42921 fputs ("special:concat ", dump_file);
42922 else if (insn_entry[i].special_handling == SH_VPERM)
42923 fputs ("special:vperm ", dump_file);
42925 if (insn_entry[i].web_not_optimizable)
42926 fputs ("unoptimizable ", dump_file);
42927 if (insn_entry[i].will_delete)
42928 fputs ("delete ", dump_file);
42929 fputs ("\n", dump_file);
42931 fputs ("\n", dump_file);
42934 /* Return RTX with its address canonicalized to (reg) or (+ reg reg).
42935 Here RTX is an (& addr (const_int -16)). Always return a new copy
42936 to avoid problems with combine. */
42937 static rtx
42938 alignment_with_canonical_addr (rtx align)
42940 rtx canon;
42941 rtx addr = XEXP (align, 0);
42943 if (REG_P (addr))
42944 canon = addr;
42946 else if (GET_CODE (addr) == PLUS)
42948 rtx addrop0 = XEXP (addr, 0);
42949 rtx addrop1 = XEXP (addr, 1);
42951 if (!REG_P (addrop0))
42952 addrop0 = force_reg (GET_MODE (addrop0), addrop0);
42954 if (!REG_P (addrop1))
42955 addrop1 = force_reg (GET_MODE (addrop1), addrop1);
42957 canon = gen_rtx_PLUS (GET_MODE (addr), addrop0, addrop1);
42960 else
42961 canon = force_reg (GET_MODE (addr), addr);
42963 return gen_rtx_AND (GET_MODE (align), canon, GEN_INT (-16));
42966 /* Check whether an rtx is an alignment mask, and if so, return
42967 a fully-expanded rtx for the masking operation. */
42968 static rtx
42969 alignment_mask (rtx_insn *insn)
42971 rtx body = PATTERN (insn);
42973 if (GET_CODE (body) != SET
42974 || GET_CODE (SET_SRC (body)) != AND
42975 || !REG_P (XEXP (SET_SRC (body), 0)))
42976 return 0;
42978 rtx mask = XEXP (SET_SRC (body), 1);
42980 if (GET_CODE (mask) == CONST_INT)
42982 if (INTVAL (mask) == -16)
42983 return alignment_with_canonical_addr (SET_SRC (body));
42984 else
42985 return 0;
42988 if (!REG_P (mask))
42989 return 0;
42991 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
42992 df_ref use;
42993 rtx real_mask = 0;
42995 FOR_EACH_INSN_INFO_USE (use, insn_info)
42997 if (!rtx_equal_p (DF_REF_REG (use), mask))
42998 continue;
43000 struct df_link *def_link = DF_REF_CHAIN (use);
43001 if (!def_link || def_link->next)
43002 return 0;
43004 rtx_insn *const_insn = DF_REF_INSN (def_link->ref);
43005 rtx const_body = PATTERN (const_insn);
43006 if (GET_CODE (const_body) != SET)
43007 return 0;
43009 real_mask = SET_SRC (const_body);
43011 if (GET_CODE (real_mask) != CONST_INT
43012 || INTVAL (real_mask) != -16)
43013 return 0;
43016 if (real_mask == 0)
43017 return 0;
43019 return alignment_with_canonical_addr (SET_SRC (body));
43022 /* Given INSN that's a load or store based at BASE_REG, look for a
43023 feeding computation that aligns its address on a 16-byte boundary. */
43024 static rtx
43025 find_alignment_op (rtx_insn *insn, rtx base_reg)
43027 df_ref base_use;
43028 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
43029 rtx and_operation = 0;
43031 FOR_EACH_INSN_INFO_USE (base_use, insn_info)
43033 if (!rtx_equal_p (DF_REF_REG (base_use), base_reg))
43034 continue;
43036 struct df_link *base_def_link = DF_REF_CHAIN (base_use);
43037 if (!base_def_link || base_def_link->next)
43038 break;
43040 /* With stack-protector code enabled, and possibly in other
43041 circumstances, there may not be an associated insn for
43042 the def. */
43043 if (DF_REF_IS_ARTIFICIAL (base_def_link->ref))
43044 break;
43046 rtx_insn *and_insn = DF_REF_INSN (base_def_link->ref);
43047 and_operation = alignment_mask (and_insn);
43048 if (and_operation != 0)
43049 break;
43052 return and_operation;
43055 struct del_info { bool replace; rtx_insn *replace_insn; };
43057 /* If INSN is the load for an lvx pattern, put it in canonical form. */
43058 static void
43059 recombine_lvx_pattern (rtx_insn *insn, del_info *to_delete)
43061 rtx body = PATTERN (insn);
43062 gcc_assert (GET_CODE (body) == SET
43063 && GET_CODE (SET_SRC (body)) == VEC_SELECT
43064 && GET_CODE (XEXP (SET_SRC (body), 0)) == MEM);
43066 rtx mem = XEXP (SET_SRC (body), 0);
43067 rtx base_reg = XEXP (mem, 0);
43069 rtx and_operation = find_alignment_op (insn, base_reg);
43071 if (and_operation != 0)
43073 df_ref def;
43074 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
43075 FOR_EACH_INSN_INFO_DEF (def, insn_info)
43077 struct df_link *link = DF_REF_CHAIN (def);
43078 if (!link || link->next)
43079 break;
43081 rtx_insn *swap_insn = DF_REF_INSN (link->ref);
43082 if (!insn_is_swap_p (swap_insn)
43083 || insn_is_load_p (swap_insn)
43084 || insn_is_store_p (swap_insn))
43085 break;
43087 /* Expected lvx pattern found. Change the swap to
43088 a copy, and propagate the AND operation into the
43089 load. */
43090 to_delete[INSN_UID (swap_insn)].replace = true;
43091 to_delete[INSN_UID (swap_insn)].replace_insn = swap_insn;
43093 XEXP (mem, 0) = and_operation;
43094 SET_SRC (body) = mem;
43095 INSN_CODE (insn) = -1; /* Force re-recognition. */
43096 df_insn_rescan (insn);
43098 if (dump_file)
43099 fprintf (dump_file, "lvx opportunity found at %d\n",
43100 INSN_UID (insn));
43105 /* If INSN is the store for an stvx pattern, put it in canonical form. */
43106 static void
43107 recombine_stvx_pattern (rtx_insn *insn, del_info *to_delete)
43109 rtx body = PATTERN (insn);
43110 gcc_assert (GET_CODE (body) == SET
43111 && GET_CODE (SET_DEST (body)) == MEM
43112 && GET_CODE (SET_SRC (body)) == VEC_SELECT);
43113 rtx mem = SET_DEST (body);
43114 rtx base_reg = XEXP (mem, 0);
43116 rtx and_operation = find_alignment_op (insn, base_reg);
43118 if (and_operation != 0)
43120 rtx src_reg = XEXP (SET_SRC (body), 0);
43121 df_ref src_use;
43122 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
43123 FOR_EACH_INSN_INFO_USE (src_use, insn_info)
43125 if (!rtx_equal_p (DF_REF_REG (src_use), src_reg))
43126 continue;
43128 struct df_link *link = DF_REF_CHAIN (src_use);
43129 if (!link || link->next)
43130 break;
43132 rtx_insn *swap_insn = DF_REF_INSN (link->ref);
43133 if (!insn_is_swap_p (swap_insn)
43134 || insn_is_load_p (swap_insn)
43135 || insn_is_store_p (swap_insn))
43136 break;
43138 /* Expected stvx pattern found. Change the swap to
43139 a copy, and propagate the AND operation into the
43140 store. */
43141 to_delete[INSN_UID (swap_insn)].replace = true;
43142 to_delete[INSN_UID (swap_insn)].replace_insn = swap_insn;
43144 XEXP (mem, 0) = and_operation;
43145 SET_SRC (body) = src_reg;
43146 INSN_CODE (insn) = -1; /* Force re-recognition. */
43147 df_insn_rescan (insn);
43149 if (dump_file)
43150 fprintf (dump_file, "stvx opportunity found at %d\n",
43151 INSN_UID (insn));
43156 /* Look for patterns created from builtin lvx and stvx calls, and
43157 canonicalize them to be properly recognized as such. */
43158 static void
43159 recombine_lvx_stvx_patterns (function *fun)
43161 int i;
43162 basic_block bb;
43163 rtx_insn *insn;
43165 int num_insns = get_max_uid ();
43166 del_info *to_delete = XCNEWVEC (del_info, num_insns);
43168 FOR_ALL_BB_FN (bb, fun)
43169 FOR_BB_INSNS (bb, insn)
43171 if (!NONDEBUG_INSN_P (insn))
43172 continue;
43174 if (insn_is_load_p (insn) && insn_is_swap_p (insn))
43175 recombine_lvx_pattern (insn, to_delete);
43176 else if (insn_is_store_p (insn) && insn_is_swap_p (insn))
43177 recombine_stvx_pattern (insn, to_delete);
43180 /* Turning swaps into copies is delayed until now, to avoid problems
43181 with deleting instructions during the insn walk. */
43182 for (i = 0; i < num_insns; i++)
43183 if (to_delete[i].replace)
43185 rtx swap_body = PATTERN (to_delete[i].replace_insn);
43186 rtx src_reg = XEXP (SET_SRC (swap_body), 0);
43187 rtx copy = gen_rtx_SET (SET_DEST (swap_body), src_reg);
43188 rtx_insn *new_insn = emit_insn_before (copy,
43189 to_delete[i].replace_insn);
43190 set_block_for_insn (new_insn,
43191 BLOCK_FOR_INSN (to_delete[i].replace_insn));
43192 df_insn_rescan (new_insn);
43193 df_insn_delete (to_delete[i].replace_insn);
43194 remove_insn (to_delete[i].replace_insn);
43195 to_delete[i].replace_insn->set_deleted ();
43198 free (to_delete);
43201 /* Main entry point for this pass. */
43202 unsigned int
43203 rs6000_analyze_swaps (function *fun)
43205 swap_web_entry *insn_entry;
43206 basic_block bb;
43207 rtx_insn *insn, *curr_insn = 0;
43209 /* Dataflow analysis for use-def chains. */
43210 df_set_flags (DF_RD_PRUNE_DEAD_DEFS);
43211 df_chain_add_problem (DF_DU_CHAIN | DF_UD_CHAIN);
43212 df_analyze ();
43213 df_set_flags (DF_DEFER_INSN_RESCAN);
43215 /* Pre-pass to recombine lvx and stvx patterns so we don't lose info. */
43216 recombine_lvx_stvx_patterns (fun);
43218 /* Allocate structure to represent webs of insns. */
43219 insn_entry = XCNEWVEC (swap_web_entry, get_max_uid ());
43221 /* Walk the insns to gather basic data. */
43222 FOR_ALL_BB_FN (bb, fun)
43223 FOR_BB_INSNS_SAFE (bb, insn, curr_insn)
43225 unsigned int uid = INSN_UID (insn);
43226 if (NONDEBUG_INSN_P (insn))
43228 insn_entry[uid].insn = insn;
43230 if (GET_CODE (insn) == CALL_INSN)
43231 insn_entry[uid].is_call = 1;
43233 /* Walk the uses and defs to see if we mention vector regs.
43234 Record any constraints on optimization of such mentions. */
43235 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
43236 df_ref mention;
43237 FOR_EACH_INSN_INFO_USE (mention, insn_info)
43239 /* We use DF_REF_REAL_REG here to get inside any subregs. */
43240 machine_mode mode = GET_MODE (DF_REF_REAL_REG (mention));
43242 /* If a use gets its value from a call insn, it will be
43243 a hard register and will look like (reg:V4SI 3 3).
43244 The df analysis creates two mentions for GPR3 and GPR4,
43245 both DImode. We must recognize this and treat it as a
43246 vector mention to ensure the call is unioned with this
43247 use. */
43248 if (mode == DImode && DF_REF_INSN_INFO (mention))
43250 rtx feeder = DF_REF_INSN (mention);
43251 /* FIXME: It is pretty hard to get from the df mention
43252 to the mode of the use in the insn. We arbitrarily
43253 pick a vector mode here, even though the use might
43254 be a real DImode. We can be too conservative
43255 (create a web larger than necessary) because of
43256 this, so consider eventually fixing this. */
43257 if (GET_CODE (feeder) == CALL_INSN)
43258 mode = V4SImode;
43261 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode) || mode == TImode)
43263 insn_entry[uid].is_relevant = 1;
43264 if (mode == TImode || mode == V1TImode
43265 || FLOAT128_VECTOR_P (mode))
43266 insn_entry[uid].is_128_int = 1;
43267 if (DF_REF_INSN_INFO (mention))
43268 insn_entry[uid].contains_subreg
43269 = !rtx_equal_p (DF_REF_REG (mention),
43270 DF_REF_REAL_REG (mention));
43271 union_defs (insn_entry, insn, mention);
43274 FOR_EACH_INSN_INFO_DEF (mention, insn_info)
43276 /* We use DF_REF_REAL_REG here to get inside any subregs. */
43277 machine_mode mode = GET_MODE (DF_REF_REAL_REG (mention));
43279 /* If we're loading up a hard vector register for a call,
43280 it looks like (set (reg:V4SI 9 9) (...)). The df
43281 analysis creates two mentions for GPR9 and GPR10, both
43282 DImode. So relying on the mode from the mentions
43283 isn't sufficient to ensure we union the call into the
43284 web with the parameter setup code. */
43285 if (mode == DImode && GET_CODE (insn) == SET
43286 && ALTIVEC_OR_VSX_VECTOR_MODE (GET_MODE (SET_DEST (insn))))
43287 mode = GET_MODE (SET_DEST (insn));
43289 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode) || mode == TImode)
43291 insn_entry[uid].is_relevant = 1;
43292 if (mode == TImode || mode == V1TImode
43293 || FLOAT128_VECTOR_P (mode))
43294 insn_entry[uid].is_128_int = 1;
43295 if (DF_REF_INSN_INFO (mention))
43296 insn_entry[uid].contains_subreg
43297 = !rtx_equal_p (DF_REF_REG (mention),
43298 DF_REF_REAL_REG (mention));
43299 /* REG_FUNCTION_VALUE_P is not valid for subregs. */
43300 else if (REG_FUNCTION_VALUE_P (DF_REF_REG (mention)))
43301 insn_entry[uid].is_live_out = 1;
43302 union_uses (insn_entry, insn, mention);
43306 if (insn_entry[uid].is_relevant)
43308 /* Determine if this is a load or store. */
43309 insn_entry[uid].is_load = insn_is_load_p (insn);
43310 insn_entry[uid].is_store = insn_is_store_p (insn);
43312 /* Determine if this is a doubleword swap. If not,
43313 determine whether it can legally be swapped. */
43314 if (insn_is_swap_p (insn))
43315 insn_entry[uid].is_swap = 1;
43316 else
43318 unsigned int special = SH_NONE;
43319 insn_entry[uid].is_swappable
43320 = insn_is_swappable_p (insn_entry, insn, &special);
43321 if (special != SH_NONE && insn_entry[uid].contains_subreg)
43322 insn_entry[uid].is_swappable = 0;
43323 else if (special != SH_NONE)
43324 insn_entry[uid].special_handling = special;
43325 else if (insn_entry[uid].contains_subreg)
43326 insn_entry[uid].special_handling = SH_SUBREG;
43332 if (dump_file)
43334 fprintf (dump_file, "\nSwap insn entry table when first built\n");
43335 dump_swap_insn_table (insn_entry);
43338 /* Record unoptimizable webs. */
43339 unsigned e = get_max_uid (), i;
43340 for (i = 0; i < e; ++i)
43342 if (!insn_entry[i].is_relevant)
43343 continue;
43345 swap_web_entry *root
43346 = (swap_web_entry*)(&insn_entry[i])->unionfind_root ();
43348 if (insn_entry[i].is_live_in || insn_entry[i].is_live_out
43349 || (insn_entry[i].contains_subreg
43350 && insn_entry[i].special_handling != SH_SUBREG)
43351 || insn_entry[i].is_128_int || insn_entry[i].is_call
43352 || !(insn_entry[i].is_swappable || insn_entry[i].is_swap))
43353 root->web_not_optimizable = 1;
43355 /* If we have loads or stores that aren't permuting then the
43356 optimization isn't appropriate. */
43357 else if ((insn_entry[i].is_load || insn_entry[i].is_store)
43358 && !insn_entry[i].is_swap && !insn_entry[i].is_swappable)
43359 root->web_not_optimizable = 1;
43361 /* If we have permuting loads or stores that are not accompanied
43362 by a register swap, the optimization isn't appropriate. */
43363 else if (insn_entry[i].is_load && insn_entry[i].is_swap)
43365 rtx insn = insn_entry[i].insn;
43366 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
43367 df_ref def;
43369 FOR_EACH_INSN_INFO_DEF (def, insn_info)
43371 struct df_link *link = DF_REF_CHAIN (def);
43373 if (!chain_contains_only_swaps (insn_entry, link, FOR_LOADS))
43375 root->web_not_optimizable = 1;
43376 break;
43380 else if (insn_entry[i].is_store && insn_entry[i].is_swap)
43382 rtx insn = insn_entry[i].insn;
43383 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
43384 df_ref use;
43386 FOR_EACH_INSN_INFO_USE (use, insn_info)
43388 struct df_link *link = DF_REF_CHAIN (use);
43390 if (!chain_contains_only_swaps (insn_entry, link, FOR_STORES))
43392 root->web_not_optimizable = 1;
43393 break;
43399 if (dump_file)
43401 fprintf (dump_file, "\nSwap insn entry table after web analysis\n");
43402 dump_swap_insn_table (insn_entry);
43405 /* For each load and store in an optimizable web (which implies
43406 the loads and stores are permuting), find the associated
43407 register swaps and mark them for removal. Due to various
43408 optimizations we may mark the same swap more than once. Also
43409 perform special handling for swappable insns that require it. */
43410 for (i = 0; i < e; ++i)
43411 if ((insn_entry[i].is_load || insn_entry[i].is_store)
43412 && insn_entry[i].is_swap)
43414 swap_web_entry* root_entry
43415 = (swap_web_entry*)((&insn_entry[i])->unionfind_root ());
43416 if (!root_entry->web_not_optimizable)
43417 mark_swaps_for_removal (insn_entry, i);
43419 else if (insn_entry[i].is_swappable && insn_entry[i].special_handling)
43421 swap_web_entry* root_entry
43422 = (swap_web_entry*)((&insn_entry[i])->unionfind_root ());
43423 if (!root_entry->web_not_optimizable)
43424 handle_special_swappables (insn_entry, i);
43427 /* Now delete the swaps marked for removal. */
43428 for (i = 0; i < e; ++i)
43429 if (insn_entry[i].will_delete)
43430 replace_swap_with_copy (insn_entry, i);
43432 /* Clean up. */
43433 free (insn_entry);
43434 return 0;
43437 const pass_data pass_data_analyze_swaps =
43439 RTL_PASS, /* type */
43440 "swaps", /* name */
43441 OPTGROUP_NONE, /* optinfo_flags */
43442 TV_NONE, /* tv_id */
43443 0, /* properties_required */
43444 0, /* properties_provided */
43445 0, /* properties_destroyed */
43446 0, /* todo_flags_start */
43447 TODO_df_finish, /* todo_flags_finish */
43450 class pass_analyze_swaps : public rtl_opt_pass
43452 public:
43453 pass_analyze_swaps(gcc::context *ctxt)
43454 : rtl_opt_pass(pass_data_analyze_swaps, ctxt)
43457 /* opt_pass methods: */
43458 virtual bool gate (function *)
43460 return (optimize > 0 && !BYTES_BIG_ENDIAN && TARGET_VSX
43461 && !TARGET_P9_VECTOR && rs6000_optimize_swaps);
43464 virtual unsigned int execute (function *fun)
43466 return rs6000_analyze_swaps (fun);
43469 opt_pass *clone ()
43471 return new pass_analyze_swaps (m_ctxt);
43474 }; // class pass_analyze_swaps
43476 rtl_opt_pass *
43477 make_pass_analyze_swaps (gcc::context *ctxt)
43479 return new pass_analyze_swaps (ctxt);
43482 #ifdef RS6000_GLIBC_ATOMIC_FENV
43483 /* Function declarations for rs6000_atomic_assign_expand_fenv. */
43484 static tree atomic_hold_decl, atomic_clear_decl, atomic_update_decl;
43485 #endif
43487 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
43489 static void
43490 rs6000_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
43492 if (!TARGET_HARD_FLOAT || !TARGET_FPRS)
43494 #ifdef RS6000_GLIBC_ATOMIC_FENV
43495 if (atomic_hold_decl == NULL_TREE)
43497 atomic_hold_decl
43498 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
43499 get_identifier ("__atomic_feholdexcept"),
43500 build_function_type_list (void_type_node,
43501 double_ptr_type_node,
43502 NULL_TREE));
43503 TREE_PUBLIC (atomic_hold_decl) = 1;
43504 DECL_EXTERNAL (atomic_hold_decl) = 1;
43507 if (atomic_clear_decl == NULL_TREE)
43509 atomic_clear_decl
43510 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
43511 get_identifier ("__atomic_feclearexcept"),
43512 build_function_type_list (void_type_node,
43513 NULL_TREE));
43514 TREE_PUBLIC (atomic_clear_decl) = 1;
43515 DECL_EXTERNAL (atomic_clear_decl) = 1;
43518 tree const_double = build_qualified_type (double_type_node,
43519 TYPE_QUAL_CONST);
43520 tree const_double_ptr = build_pointer_type (const_double);
43521 if (atomic_update_decl == NULL_TREE)
43523 atomic_update_decl
43524 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
43525 get_identifier ("__atomic_feupdateenv"),
43526 build_function_type_list (void_type_node,
43527 const_double_ptr,
43528 NULL_TREE));
43529 TREE_PUBLIC (atomic_update_decl) = 1;
43530 DECL_EXTERNAL (atomic_update_decl) = 1;
43533 tree fenv_var = create_tmp_var_raw (double_type_node);
43534 TREE_ADDRESSABLE (fenv_var) = 1;
43535 tree fenv_addr = build1 (ADDR_EXPR, double_ptr_type_node, fenv_var);
43537 *hold = build_call_expr (atomic_hold_decl, 1, fenv_addr);
43538 *clear = build_call_expr (atomic_clear_decl, 0);
43539 *update = build_call_expr (atomic_update_decl, 1,
43540 fold_convert (const_double_ptr, fenv_addr));
43541 #endif
43542 return;
43545 tree mffs = rs6000_builtin_decls[RS6000_BUILTIN_MFFS];
43546 tree mtfsf = rs6000_builtin_decls[RS6000_BUILTIN_MTFSF];
43547 tree call_mffs = build_call_expr (mffs, 0);
43549 /* Generates the equivalent of feholdexcept (&fenv_var)
43551 *fenv_var = __builtin_mffs ();
43552 double fenv_hold;
43553 *(uint64_t*)&fenv_hold = *(uint64_t*)fenv_var & 0xffffffff00000007LL;
43554 __builtin_mtfsf (0xff, fenv_hold); */
43556 /* Mask to clear everything except for the rounding modes and non-IEEE
43557 arithmetic flag. */
43558 const unsigned HOST_WIDE_INT hold_exception_mask =
43559 HOST_WIDE_INT_C (0xffffffff00000007);
43561 tree fenv_var = create_tmp_var_raw (double_type_node);
43563 tree hold_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_var, call_mffs);
43565 tree fenv_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_var);
43566 tree fenv_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
43567 build_int_cst (uint64_type_node,
43568 hold_exception_mask));
43570 tree fenv_hold_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
43571 fenv_llu_and);
43573 tree hold_mtfsf = build_call_expr (mtfsf, 2,
43574 build_int_cst (unsigned_type_node, 0xff),
43575 fenv_hold_mtfsf);
43577 *hold = build2 (COMPOUND_EXPR, void_type_node, hold_mffs, hold_mtfsf);
43579 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT):
43581 double fenv_clear = __builtin_mffs ();
43582 *(uint64_t)&fenv_clear &= 0xffffffff00000000LL;
43583 __builtin_mtfsf (0xff, fenv_clear); */
43585 /* Mask to clear everything except for the rounding modes and non-IEEE
43586 arithmetic flag. */
43587 const unsigned HOST_WIDE_INT clear_exception_mask =
43588 HOST_WIDE_INT_C (0xffffffff00000000);
43590 tree fenv_clear = create_tmp_var_raw (double_type_node);
43592 tree clear_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_clear, call_mffs);
43594 tree fenv_clean_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_clear);
43595 tree fenv_clear_llu_and = build2 (BIT_AND_EXPR, uint64_type_node,
43596 fenv_clean_llu,
43597 build_int_cst (uint64_type_node,
43598 clear_exception_mask));
43600 tree fenv_clear_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
43601 fenv_clear_llu_and);
43603 tree clear_mtfsf = build_call_expr (mtfsf, 2,
43604 build_int_cst (unsigned_type_node, 0xff),
43605 fenv_clear_mtfsf);
43607 *clear = build2 (COMPOUND_EXPR, void_type_node, clear_mffs, clear_mtfsf);
43609 /* Generates the equivalent of feupdateenv (&fenv_var)
43611 double old_fenv = __builtin_mffs ();
43612 double fenv_update;
43613 *(uint64_t*)&fenv_update = (*(uint64_t*)&old & 0xffffffff1fffff00LL) |
43614 (*(uint64_t*)fenv_var 0x1ff80fff);
43615 __builtin_mtfsf (0xff, fenv_update); */
43617 const unsigned HOST_WIDE_INT update_exception_mask =
43618 HOST_WIDE_INT_C (0xffffffff1fffff00);
43619 const unsigned HOST_WIDE_INT new_exception_mask =
43620 HOST_WIDE_INT_C (0x1ff80fff);
43622 tree old_fenv = create_tmp_var_raw (double_type_node);
43623 tree update_mffs = build2 (MODIFY_EXPR, void_type_node, old_fenv, call_mffs);
43625 tree old_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, old_fenv);
43626 tree old_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, old_llu,
43627 build_int_cst (uint64_type_node,
43628 update_exception_mask));
43630 tree new_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
43631 build_int_cst (uint64_type_node,
43632 new_exception_mask));
43634 tree new_llu_mask = build2 (BIT_IOR_EXPR, uint64_type_node,
43635 old_llu_and, new_llu_and);
43637 tree fenv_update_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
43638 new_llu_mask);
43640 tree update_mtfsf = build_call_expr (mtfsf, 2,
43641 build_int_cst (unsigned_type_node, 0xff),
43642 fenv_update_mtfsf);
43644 *update = build2 (COMPOUND_EXPR, void_type_node, update_mffs, update_mtfsf);
43647 /* Implement the TARGET_OPTAB_SUPPORTED_P hook. */
43649 static bool
43650 rs6000_optab_supported_p (int op, machine_mode mode1, machine_mode,
43651 optimization_type opt_type)
43653 switch (op)
43655 case rsqrt_optab:
43656 return (opt_type == OPTIMIZE_FOR_SPEED
43657 && RS6000_RECIP_AUTO_RSQRTE_P (mode1));
43659 default:
43660 return true;
43664 struct gcc_target targetm = TARGET_INITIALIZER;
43666 #include "gt-powerpcspe.h"