predicates.md (cint34_operand): Update SIGNED_34BIT_OFFSET_P call.
[official-gcc.git] / gcc / config / rs6000 / rs6000.c
blob39fa6ec382bd7332357cddce1832019f5016fb67
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2019 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #define IN_TARGET_CODE 1
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "backend.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "memmodel.h"
30 #include "gimple.h"
31 #include "cfghooks.h"
32 #include "cfgloop.h"
33 #include "df.h"
34 #include "tm_p.h"
35 #include "stringpool.h"
36 #include "expmed.h"
37 #include "optabs.h"
38 #include "regs.h"
39 #include "ira.h"
40 #include "recog.h"
41 #include "cgraph.h"
42 #include "diagnostic-core.h"
43 #include "insn-attr.h"
44 #include "flags.h"
45 #include "alias.h"
46 #include "fold-const.h"
47 #include "attribs.h"
48 #include "stor-layout.h"
49 #include "calls.h"
50 #include "print-tree.h"
51 #include "varasm.h"
52 #include "explow.h"
53 #include "expr.h"
54 #include "output.h"
55 #include "common/common-target.h"
56 #include "langhooks.h"
57 #include "reload.h"
58 #include "sched-int.h"
59 #include "gimplify.h"
60 #include "gimple-fold.h"
61 #include "gimple-iterator.h"
62 #include "gimple-ssa.h"
63 #include "gimple-walk.h"
64 #include "intl.h"
65 #include "params.h"
66 #include "tm-constrs.h"
67 #include "tree-vectorizer.h"
68 #include "target-globals.h"
69 #include "builtins.h"
70 #include "tree-vector-builder.h"
71 #include "context.h"
72 #include "tree-pass.h"
73 #include "except.h"
74 #if TARGET_XCOFF
75 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
76 #endif
77 #include "case-cfn-macros.h"
78 #include "ppc-auxv.h"
79 #include "tree-ssa-propagate.h"
80 #include "tree-vrp.h"
81 #include "tree-ssanames.h"
82 #include "rs6000-internal.h"
84 /* This file should be included last. */
85 #include "target-def.h"
87 #ifndef TARGET_NO_PROTOTYPE
88 #define TARGET_NO_PROTOTYPE 0
89 #endif
91 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
92 systems will also set long double to be IEEE 128-bit. AIX and Darwin
93 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
94 those systems will not pick up this default. This needs to be after all
95 of the include files, so that POWERPC_LINUX and POWERPC_FREEBSD are
96 properly defined. */
97 #ifndef TARGET_IEEEQUAD_DEFAULT
98 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
99 #define TARGET_IEEEQUAD_DEFAULT 1
100 #else
101 #define TARGET_IEEEQUAD_DEFAULT 0
102 #endif
103 #endif
105 static pad_direction rs6000_function_arg_padding (machine_mode, const_tree);
107 /* Support targetm.vectorize.builtin_mask_for_load. */
108 static GTY(()) tree altivec_builtin_mask_for_load;
110 /* Set to nonzero once AIX common-mode calls have been defined. */
111 static GTY(()) int common_mode_defined;
113 #ifdef USING_ELFOS_H
114 /* Counter for labels which are to be placed in .fixup. */
115 int fixuplabelno = 0;
116 #endif
118 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
119 int dot_symbols;
121 /* Specify the machine mode that pointers have. After generation of rtl, the
122 compiler makes no further distinction between pointers and any other objects
123 of this machine mode. */
124 scalar_int_mode rs6000_pmode;
126 #if TARGET_ELF
127 /* Note whether IEEE 128-bit floating point was passed or returned, either as
128 the __float128/_Float128 explicit type, or when long double is IEEE 128-bit
129 floating point. We changed the default C++ mangling for these types and we
130 may want to generate a weak alias of the old mangling (U10__float128) to the
131 new mangling (u9__ieee128). */
132 static bool rs6000_passes_ieee128;
133 #endif
135 /* Generate the manged name (i.e. U10__float128) used in GCC 8.1, and not the
136 name used in current releases (i.e. u9__ieee128). */
137 static bool ieee128_mangling_gcc_8_1;
139 /* Width in bits of a pointer. */
140 unsigned rs6000_pointer_size;
142 #ifdef HAVE_AS_GNU_ATTRIBUTE
143 # ifndef HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
144 # define HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE 0
145 # endif
146 /* Flag whether floating point values have been passed/returned.
147 Note that this doesn't say whether fprs are used, since the
148 Tag_GNU_Power_ABI_FP .gnu.attributes value this flag controls
149 should be set for soft-float values passed in gprs and ieee128
150 values passed in vsx registers. */
151 static bool rs6000_passes_float;
152 static bool rs6000_passes_long_double;
153 /* Flag whether vector values have been passed/returned. */
154 static bool rs6000_passes_vector;
155 /* Flag whether small (<= 8 byte) structures have been returned. */
156 static bool rs6000_returns_struct;
157 #endif
159 /* Value is TRUE if register/mode pair is acceptable. */
160 static bool rs6000_hard_regno_mode_ok_p
161 [NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
163 /* Maximum number of registers needed for a given register class and mode. */
164 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
166 /* How many registers are needed for a given register and mode. */
167 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
169 /* Map register number to register class. */
170 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
172 static int dbg_cost_ctrl;
174 /* Built in types. */
175 tree rs6000_builtin_types[RS6000_BTI_MAX];
176 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
178 /* Flag to say the TOC is initialized */
179 int toc_initialized, need_toc_init;
180 char toc_label_name[10];
182 /* Cached value of rs6000_variable_issue. This is cached in
183 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
184 static short cached_can_issue_more;
186 static GTY(()) section *read_only_data_section;
187 static GTY(()) section *private_data_section;
188 static GTY(()) section *tls_data_section;
189 static GTY(()) section *tls_private_data_section;
190 static GTY(()) section *read_only_private_data_section;
191 static GTY(()) section *sdata2_section;
193 extern GTY(()) section *toc_section;
194 section *toc_section = 0;
196 struct builtin_description
198 const HOST_WIDE_INT mask;
199 const enum insn_code icode;
200 const char *const name;
201 const enum rs6000_builtins code;
204 /* Describe the vector unit used for modes. */
205 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
206 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
208 /* Register classes for various constraints that are based on the target
209 switches. */
210 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
212 /* Describe the alignment of a vector. */
213 int rs6000_vector_align[NUM_MACHINE_MODES];
215 /* Map selected modes to types for builtins. */
216 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
218 /* What modes to automatically generate reciprocal divide estimate (fre) and
219 reciprocal sqrt (frsqrte) for. */
220 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
222 /* Masks to determine which reciprocal esitmate instructions to generate
223 automatically. */
224 enum rs6000_recip_mask {
225 RECIP_SF_DIV = 0x001, /* Use divide estimate */
226 RECIP_DF_DIV = 0x002,
227 RECIP_V4SF_DIV = 0x004,
228 RECIP_V2DF_DIV = 0x008,
230 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
231 RECIP_DF_RSQRT = 0x020,
232 RECIP_V4SF_RSQRT = 0x040,
233 RECIP_V2DF_RSQRT = 0x080,
235 /* Various combination of flags for -mrecip=xxx. */
236 RECIP_NONE = 0,
237 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
238 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
239 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
241 RECIP_HIGH_PRECISION = RECIP_ALL,
243 /* On low precision machines like the power5, don't enable double precision
244 reciprocal square root estimate, since it isn't accurate enough. */
245 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
248 /* -mrecip options. */
249 static struct
251 const char *string; /* option name */
252 unsigned int mask; /* mask bits to set */
253 } recip_options[] = {
254 { "all", RECIP_ALL },
255 { "none", RECIP_NONE },
256 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
257 | RECIP_V2DF_DIV) },
258 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
259 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
260 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
261 | RECIP_V2DF_RSQRT) },
262 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
263 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
266 /* Used by __builtin_cpu_is(), mapping from PLATFORM names to values. */
267 static const struct
269 const char *cpu;
270 unsigned int cpuid;
271 } cpu_is_info[] = {
272 { "power9", PPC_PLATFORM_POWER9 },
273 { "power8", PPC_PLATFORM_POWER8 },
274 { "power7", PPC_PLATFORM_POWER7 },
275 { "power6x", PPC_PLATFORM_POWER6X },
276 { "power6", PPC_PLATFORM_POWER6 },
277 { "power5+", PPC_PLATFORM_POWER5_PLUS },
278 { "power5", PPC_PLATFORM_POWER5 },
279 { "ppc970", PPC_PLATFORM_PPC970 },
280 { "power4", PPC_PLATFORM_POWER4 },
281 { "ppca2", PPC_PLATFORM_PPCA2 },
282 { "ppc476", PPC_PLATFORM_PPC476 },
283 { "ppc464", PPC_PLATFORM_PPC464 },
284 { "ppc440", PPC_PLATFORM_PPC440 },
285 { "ppc405", PPC_PLATFORM_PPC405 },
286 { "ppc-cell-be", PPC_PLATFORM_CELL_BE }
289 /* Used by __builtin_cpu_supports(), mapping from HWCAP names to masks. */
290 static const struct
292 const char *hwcap;
293 int mask;
294 unsigned int id;
295 } cpu_supports_info[] = {
296 /* AT_HWCAP masks. */
297 { "4xxmac", PPC_FEATURE_HAS_4xxMAC, 0 },
298 { "altivec", PPC_FEATURE_HAS_ALTIVEC, 0 },
299 { "arch_2_05", PPC_FEATURE_ARCH_2_05, 0 },
300 { "arch_2_06", PPC_FEATURE_ARCH_2_06, 0 },
301 { "archpmu", PPC_FEATURE_PERFMON_COMPAT, 0 },
302 { "booke", PPC_FEATURE_BOOKE, 0 },
303 { "cellbe", PPC_FEATURE_CELL_BE, 0 },
304 { "dfp", PPC_FEATURE_HAS_DFP, 0 },
305 { "efpdouble", PPC_FEATURE_HAS_EFP_DOUBLE, 0 },
306 { "efpsingle", PPC_FEATURE_HAS_EFP_SINGLE, 0 },
307 { "fpu", PPC_FEATURE_HAS_FPU, 0 },
308 { "ic_snoop", PPC_FEATURE_ICACHE_SNOOP, 0 },
309 { "mmu", PPC_FEATURE_HAS_MMU, 0 },
310 { "notb", PPC_FEATURE_NO_TB, 0 },
311 { "pa6t", PPC_FEATURE_PA6T, 0 },
312 { "power4", PPC_FEATURE_POWER4, 0 },
313 { "power5", PPC_FEATURE_POWER5, 0 },
314 { "power5+", PPC_FEATURE_POWER5_PLUS, 0 },
315 { "power6x", PPC_FEATURE_POWER6_EXT, 0 },
316 { "ppc32", PPC_FEATURE_32, 0 },
317 { "ppc601", PPC_FEATURE_601_INSTR, 0 },
318 { "ppc64", PPC_FEATURE_64, 0 },
319 { "ppcle", PPC_FEATURE_PPC_LE, 0 },
320 { "smt", PPC_FEATURE_SMT, 0 },
321 { "spe", PPC_FEATURE_HAS_SPE, 0 },
322 { "true_le", PPC_FEATURE_TRUE_LE, 0 },
323 { "ucache", PPC_FEATURE_UNIFIED_CACHE, 0 },
324 { "vsx", PPC_FEATURE_HAS_VSX, 0 },
326 /* AT_HWCAP2 masks. */
327 { "arch_2_07", PPC_FEATURE2_ARCH_2_07, 1 },
328 { "dscr", PPC_FEATURE2_HAS_DSCR, 1 },
329 { "ebb", PPC_FEATURE2_HAS_EBB, 1 },
330 { "htm", PPC_FEATURE2_HAS_HTM, 1 },
331 { "htm-nosc", PPC_FEATURE2_HTM_NOSC, 1 },
332 { "htm-no-suspend", PPC_FEATURE2_HTM_NO_SUSPEND, 1 },
333 { "isel", PPC_FEATURE2_HAS_ISEL, 1 },
334 { "tar", PPC_FEATURE2_HAS_TAR, 1 },
335 { "vcrypto", PPC_FEATURE2_HAS_VEC_CRYPTO, 1 },
336 { "arch_3_00", PPC_FEATURE2_ARCH_3_00, 1 },
337 { "ieee128", PPC_FEATURE2_HAS_IEEE128, 1 },
338 { "darn", PPC_FEATURE2_DARN, 1 },
339 { "scv", PPC_FEATURE2_SCV, 1 }
342 /* On PowerPC, we have a limited number of target clones that we care about
343 which means we can use an array to hold the options, rather than having more
344 elaborate data structures to identify each possible variation. Order the
345 clones from the default to the highest ISA. */
346 enum {
347 CLONE_DEFAULT = 0, /* default clone. */
348 CLONE_ISA_2_05, /* ISA 2.05 (power6). */
349 CLONE_ISA_2_06, /* ISA 2.06 (power7). */
350 CLONE_ISA_2_07, /* ISA 2.07 (power8). */
351 CLONE_ISA_3_00, /* ISA 3.00 (power9). */
352 CLONE_MAX
355 /* Map compiler ISA bits into HWCAP names. */
356 struct clone_map {
357 HOST_WIDE_INT isa_mask; /* rs6000_isa mask */
358 const char *name; /* name to use in __builtin_cpu_supports. */
361 static const struct clone_map rs6000_clone_map[CLONE_MAX] = {
362 { 0, "" }, /* Default options. */
363 { OPTION_MASK_CMPB, "arch_2_05" }, /* ISA 2.05 (power6). */
364 { OPTION_MASK_POPCNTD, "arch_2_06" }, /* ISA 2.06 (power7). */
365 { OPTION_MASK_P8_VECTOR, "arch_2_07" }, /* ISA 2.07 (power8). */
366 { OPTION_MASK_P9_VECTOR, "arch_3_00" }, /* ISA 3.00 (power9). */
370 /* Newer LIBCs explicitly export this symbol to declare that they provide
371 the AT_PLATFORM and AT_HWCAP/AT_HWCAP2 values in the TCB. We emit a
372 reference to this symbol whenever we expand a CPU builtin, so that
373 we never link against an old LIBC. */
374 const char *tcb_verification_symbol = "__parse_hwcap_and_convert_at_platform";
376 /* True if we have expanded a CPU builtin. */
377 bool cpu_builtin_p;
379 /* Pointer to function (in rs6000-c.c) that can define or undefine target
380 macros that have changed. Languages that don't support the preprocessor
381 don't link in rs6000-c.c, so we can't call it directly. */
382 void (*rs6000_target_modify_macros_ptr) (bool, HOST_WIDE_INT, HOST_WIDE_INT);
384 /* Simplfy register classes into simpler classifications. We assume
385 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
386 check for standard register classes (gpr/floating/altivec/vsx) and
387 floating/vector classes (float/altivec/vsx). */
389 enum rs6000_reg_type {
390 NO_REG_TYPE,
391 PSEUDO_REG_TYPE,
392 GPR_REG_TYPE,
393 VSX_REG_TYPE,
394 ALTIVEC_REG_TYPE,
395 FPR_REG_TYPE,
396 SPR_REG_TYPE,
397 CR_REG_TYPE
400 /* Map register class to register type. */
401 static enum rs6000_reg_type reg_class_to_reg_type[N_REG_CLASSES];
403 /* First/last register type for the 'normal' register types (i.e. general
404 purpose, floating point, altivec, and VSX registers). */
405 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
407 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
410 /* Register classes we care about in secondary reload or go if legitimate
411 address. We only need to worry about GPR, FPR, and Altivec registers here,
412 along an ANY field that is the OR of the 3 register classes. */
414 enum rs6000_reload_reg_type {
415 RELOAD_REG_GPR, /* General purpose registers. */
416 RELOAD_REG_FPR, /* Traditional floating point regs. */
417 RELOAD_REG_VMX, /* Altivec (VMX) registers. */
418 RELOAD_REG_ANY, /* OR of GPR, FPR, Altivec masks. */
419 N_RELOAD_REG
422 /* For setting up register classes, loop through the 3 register classes mapping
423 into real registers, and skip the ANY class, which is just an OR of the
424 bits. */
425 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
426 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
428 /* Map reload register type to a register in the register class. */
429 struct reload_reg_map_type {
430 const char *name; /* Register class name. */
431 int reg; /* Register in the register class. */
434 static const struct reload_reg_map_type reload_reg_map[N_RELOAD_REG] = {
435 { "Gpr", FIRST_GPR_REGNO }, /* RELOAD_REG_GPR. */
436 { "Fpr", FIRST_FPR_REGNO }, /* RELOAD_REG_FPR. */
437 { "VMX", FIRST_ALTIVEC_REGNO }, /* RELOAD_REG_VMX. */
438 { "Any", -1 }, /* RELOAD_REG_ANY. */
441 /* Mask bits for each register class, indexed per mode. Historically the
442 compiler has been more restrictive which types can do PRE_MODIFY instead of
443 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
444 typedef unsigned char addr_mask_type;
446 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
447 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
448 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
449 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
450 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
451 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
452 #define RELOAD_REG_AND_M16 0x40 /* AND -16 addressing. */
453 #define RELOAD_REG_QUAD_OFFSET 0x80 /* quad offset is limited. */
455 /* Register type masks based on the type, of valid addressing modes. */
456 struct rs6000_reg_addr {
457 enum insn_code reload_load; /* INSN to reload for loading. */
458 enum insn_code reload_store; /* INSN to reload for storing. */
459 enum insn_code reload_fpr_gpr; /* INSN to move from FPR to GPR. */
460 enum insn_code reload_gpr_vsx; /* INSN to move from GPR to VSX. */
461 enum insn_code reload_vsx_gpr; /* INSN to move from VSX to GPR. */
462 addr_mask_type addr_mask[(int)N_RELOAD_REG]; /* Valid address masks. */
463 bool scalar_in_vmx_p; /* Scalar value can go in VMX. */
466 static struct rs6000_reg_addr reg_addr[NUM_MACHINE_MODES];
468 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
469 static inline bool
470 mode_supports_pre_incdec_p (machine_mode mode)
472 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_INCDEC)
473 != 0);
476 /* Helper function to say whether a mode supports PRE_MODIFY. */
477 static inline bool
478 mode_supports_pre_modify_p (machine_mode mode)
480 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_MODIFY)
481 != 0);
484 /* Return true if we have D-form addressing in altivec registers. */
485 static inline bool
486 mode_supports_vmx_dform (machine_mode mode)
488 return ((reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_OFFSET) != 0);
491 /* Return true if we have D-form addressing in VSX registers. This addressing
492 is more limited than normal d-form addressing in that the offset must be
493 aligned on a 16-byte boundary. */
494 static inline bool
495 mode_supports_dq_form (machine_mode mode)
497 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_QUAD_OFFSET)
498 != 0);
501 /* Given that there exists at least one variable that is set (produced)
502 by OUT_INSN and read (consumed) by IN_INSN, return true iff
503 IN_INSN represents one or more memory store operations and none of
504 the variables set by OUT_INSN is used by IN_INSN as the address of a
505 store operation. If either IN_INSN or OUT_INSN does not represent
506 a "single" RTL SET expression (as loosely defined by the
507 implementation of the single_set function) or a PARALLEL with only
508 SETs, CLOBBERs, and USEs inside, this function returns false.
510 This rs6000-specific version of store_data_bypass_p checks for
511 certain conditions that result in assertion failures (and internal
512 compiler errors) in the generic store_data_bypass_p function and
513 returns false rather than calling store_data_bypass_p if one of the
514 problematic conditions is detected. */
517 rs6000_store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
519 rtx out_set, in_set;
520 rtx out_pat, in_pat;
521 rtx out_exp, in_exp;
522 int i, j;
524 in_set = single_set (in_insn);
525 if (in_set)
527 if (MEM_P (SET_DEST (in_set)))
529 out_set = single_set (out_insn);
530 if (!out_set)
532 out_pat = PATTERN (out_insn);
533 if (GET_CODE (out_pat) == PARALLEL)
535 for (i = 0; i < XVECLEN (out_pat, 0); i++)
537 out_exp = XVECEXP (out_pat, 0, i);
538 if ((GET_CODE (out_exp) == CLOBBER)
539 || (GET_CODE (out_exp) == USE))
540 continue;
541 else if (GET_CODE (out_exp) != SET)
542 return false;
548 else
550 in_pat = PATTERN (in_insn);
551 if (GET_CODE (in_pat) != PARALLEL)
552 return false;
554 for (i = 0; i < XVECLEN (in_pat, 0); i++)
556 in_exp = XVECEXP (in_pat, 0, i);
557 if ((GET_CODE (in_exp) == CLOBBER) || (GET_CODE (in_exp) == USE))
558 continue;
559 else if (GET_CODE (in_exp) != SET)
560 return false;
562 if (MEM_P (SET_DEST (in_exp)))
564 out_set = single_set (out_insn);
565 if (!out_set)
567 out_pat = PATTERN (out_insn);
568 if (GET_CODE (out_pat) != PARALLEL)
569 return false;
570 for (j = 0; j < XVECLEN (out_pat, 0); j++)
572 out_exp = XVECEXP (out_pat, 0, j);
573 if ((GET_CODE (out_exp) == CLOBBER)
574 || (GET_CODE (out_exp) == USE))
575 continue;
576 else if (GET_CODE (out_exp) != SET)
577 return false;
583 return store_data_bypass_p (out_insn, in_insn);
587 /* Processor costs (relative to an add) */
589 const struct processor_costs *rs6000_cost;
591 /* Instruction size costs on 32bit processors. */
592 static const
593 struct processor_costs size32_cost = {
594 COSTS_N_INSNS (1), /* mulsi */
595 COSTS_N_INSNS (1), /* mulsi_const */
596 COSTS_N_INSNS (1), /* mulsi_const9 */
597 COSTS_N_INSNS (1), /* muldi */
598 COSTS_N_INSNS (1), /* divsi */
599 COSTS_N_INSNS (1), /* divdi */
600 COSTS_N_INSNS (1), /* fp */
601 COSTS_N_INSNS (1), /* dmul */
602 COSTS_N_INSNS (1), /* sdiv */
603 COSTS_N_INSNS (1), /* ddiv */
604 32, /* cache line size */
605 0, /* l1 cache */
606 0, /* l2 cache */
607 0, /* streams */
608 0, /* SF->DF convert */
611 /* Instruction size costs on 64bit processors. */
612 static const
613 struct processor_costs size64_cost = {
614 COSTS_N_INSNS (1), /* mulsi */
615 COSTS_N_INSNS (1), /* mulsi_const */
616 COSTS_N_INSNS (1), /* mulsi_const9 */
617 COSTS_N_INSNS (1), /* muldi */
618 COSTS_N_INSNS (1), /* divsi */
619 COSTS_N_INSNS (1), /* divdi */
620 COSTS_N_INSNS (1), /* fp */
621 COSTS_N_INSNS (1), /* dmul */
622 COSTS_N_INSNS (1), /* sdiv */
623 COSTS_N_INSNS (1), /* ddiv */
624 128, /* cache line size */
625 0, /* l1 cache */
626 0, /* l2 cache */
627 0, /* streams */
628 0, /* SF->DF convert */
631 /* Instruction costs on RS64A processors. */
632 static const
633 struct processor_costs rs64a_cost = {
634 COSTS_N_INSNS (20), /* mulsi */
635 COSTS_N_INSNS (12), /* mulsi_const */
636 COSTS_N_INSNS (8), /* mulsi_const9 */
637 COSTS_N_INSNS (34), /* muldi */
638 COSTS_N_INSNS (65), /* divsi */
639 COSTS_N_INSNS (67), /* divdi */
640 COSTS_N_INSNS (4), /* fp */
641 COSTS_N_INSNS (4), /* dmul */
642 COSTS_N_INSNS (31), /* sdiv */
643 COSTS_N_INSNS (31), /* ddiv */
644 128, /* cache line size */
645 128, /* l1 cache */
646 2048, /* l2 cache */
647 1, /* streams */
648 0, /* SF->DF convert */
651 /* Instruction costs on MPCCORE processors. */
652 static const
653 struct processor_costs mpccore_cost = {
654 COSTS_N_INSNS (2), /* mulsi */
655 COSTS_N_INSNS (2), /* mulsi_const */
656 COSTS_N_INSNS (2), /* mulsi_const9 */
657 COSTS_N_INSNS (2), /* muldi */
658 COSTS_N_INSNS (6), /* divsi */
659 COSTS_N_INSNS (6), /* divdi */
660 COSTS_N_INSNS (4), /* fp */
661 COSTS_N_INSNS (5), /* dmul */
662 COSTS_N_INSNS (10), /* sdiv */
663 COSTS_N_INSNS (17), /* ddiv */
664 32, /* cache line size */
665 4, /* l1 cache */
666 16, /* l2 cache */
667 1, /* streams */
668 0, /* SF->DF convert */
671 /* Instruction costs on PPC403 processors. */
672 static const
673 struct processor_costs ppc403_cost = {
674 COSTS_N_INSNS (4), /* mulsi */
675 COSTS_N_INSNS (4), /* mulsi_const */
676 COSTS_N_INSNS (4), /* mulsi_const9 */
677 COSTS_N_INSNS (4), /* muldi */
678 COSTS_N_INSNS (33), /* divsi */
679 COSTS_N_INSNS (33), /* divdi */
680 COSTS_N_INSNS (11), /* fp */
681 COSTS_N_INSNS (11), /* dmul */
682 COSTS_N_INSNS (11), /* sdiv */
683 COSTS_N_INSNS (11), /* ddiv */
684 32, /* cache line size */
685 4, /* l1 cache */
686 16, /* l2 cache */
687 1, /* streams */
688 0, /* SF->DF convert */
691 /* Instruction costs on PPC405 processors. */
692 static const
693 struct processor_costs ppc405_cost = {
694 COSTS_N_INSNS (5), /* mulsi */
695 COSTS_N_INSNS (4), /* mulsi_const */
696 COSTS_N_INSNS (3), /* mulsi_const9 */
697 COSTS_N_INSNS (5), /* muldi */
698 COSTS_N_INSNS (35), /* divsi */
699 COSTS_N_INSNS (35), /* divdi */
700 COSTS_N_INSNS (11), /* fp */
701 COSTS_N_INSNS (11), /* dmul */
702 COSTS_N_INSNS (11), /* sdiv */
703 COSTS_N_INSNS (11), /* ddiv */
704 32, /* cache line size */
705 16, /* l1 cache */
706 128, /* l2 cache */
707 1, /* streams */
708 0, /* SF->DF convert */
711 /* Instruction costs on PPC440 processors. */
712 static const
713 struct processor_costs ppc440_cost = {
714 COSTS_N_INSNS (3), /* mulsi */
715 COSTS_N_INSNS (2), /* mulsi_const */
716 COSTS_N_INSNS (2), /* mulsi_const9 */
717 COSTS_N_INSNS (3), /* muldi */
718 COSTS_N_INSNS (34), /* divsi */
719 COSTS_N_INSNS (34), /* divdi */
720 COSTS_N_INSNS (5), /* fp */
721 COSTS_N_INSNS (5), /* dmul */
722 COSTS_N_INSNS (19), /* sdiv */
723 COSTS_N_INSNS (33), /* ddiv */
724 32, /* cache line size */
725 32, /* l1 cache */
726 256, /* l2 cache */
727 1, /* streams */
728 0, /* SF->DF convert */
731 /* Instruction costs on PPC476 processors. */
732 static const
733 struct processor_costs ppc476_cost = {
734 COSTS_N_INSNS (4), /* mulsi */
735 COSTS_N_INSNS (4), /* mulsi_const */
736 COSTS_N_INSNS (4), /* mulsi_const9 */
737 COSTS_N_INSNS (4), /* muldi */
738 COSTS_N_INSNS (11), /* divsi */
739 COSTS_N_INSNS (11), /* divdi */
740 COSTS_N_INSNS (6), /* fp */
741 COSTS_N_INSNS (6), /* dmul */
742 COSTS_N_INSNS (19), /* sdiv */
743 COSTS_N_INSNS (33), /* ddiv */
744 32, /* l1 cache line size */
745 32, /* l1 cache */
746 512, /* l2 cache */
747 1, /* streams */
748 0, /* SF->DF convert */
751 /* Instruction costs on PPC601 processors. */
752 static const
753 struct processor_costs ppc601_cost = {
754 COSTS_N_INSNS (5), /* mulsi */
755 COSTS_N_INSNS (5), /* mulsi_const */
756 COSTS_N_INSNS (5), /* mulsi_const9 */
757 COSTS_N_INSNS (5), /* muldi */
758 COSTS_N_INSNS (36), /* divsi */
759 COSTS_N_INSNS (36), /* divdi */
760 COSTS_N_INSNS (4), /* fp */
761 COSTS_N_INSNS (5), /* dmul */
762 COSTS_N_INSNS (17), /* sdiv */
763 COSTS_N_INSNS (31), /* ddiv */
764 32, /* cache line size */
765 32, /* l1 cache */
766 256, /* l2 cache */
767 1, /* streams */
768 0, /* SF->DF convert */
771 /* Instruction costs on PPC603 processors. */
772 static const
773 struct processor_costs ppc603_cost = {
774 COSTS_N_INSNS (5), /* mulsi */
775 COSTS_N_INSNS (3), /* mulsi_const */
776 COSTS_N_INSNS (2), /* mulsi_const9 */
777 COSTS_N_INSNS (5), /* muldi */
778 COSTS_N_INSNS (37), /* divsi */
779 COSTS_N_INSNS (37), /* divdi */
780 COSTS_N_INSNS (3), /* fp */
781 COSTS_N_INSNS (4), /* dmul */
782 COSTS_N_INSNS (18), /* sdiv */
783 COSTS_N_INSNS (33), /* ddiv */
784 32, /* cache line size */
785 8, /* l1 cache */
786 64, /* l2 cache */
787 1, /* streams */
788 0, /* SF->DF convert */
791 /* Instruction costs on PPC604 processors. */
792 static const
793 struct processor_costs ppc604_cost = {
794 COSTS_N_INSNS (4), /* mulsi */
795 COSTS_N_INSNS (4), /* mulsi_const */
796 COSTS_N_INSNS (4), /* mulsi_const9 */
797 COSTS_N_INSNS (4), /* muldi */
798 COSTS_N_INSNS (20), /* divsi */
799 COSTS_N_INSNS (20), /* divdi */
800 COSTS_N_INSNS (3), /* fp */
801 COSTS_N_INSNS (3), /* dmul */
802 COSTS_N_INSNS (18), /* sdiv */
803 COSTS_N_INSNS (32), /* ddiv */
804 32, /* cache line size */
805 16, /* l1 cache */
806 512, /* l2 cache */
807 1, /* streams */
808 0, /* SF->DF convert */
811 /* Instruction costs on PPC604e processors. */
812 static const
813 struct processor_costs ppc604e_cost = {
814 COSTS_N_INSNS (2), /* mulsi */
815 COSTS_N_INSNS (2), /* mulsi_const */
816 COSTS_N_INSNS (2), /* mulsi_const9 */
817 COSTS_N_INSNS (2), /* muldi */
818 COSTS_N_INSNS (20), /* divsi */
819 COSTS_N_INSNS (20), /* divdi */
820 COSTS_N_INSNS (3), /* fp */
821 COSTS_N_INSNS (3), /* dmul */
822 COSTS_N_INSNS (18), /* sdiv */
823 COSTS_N_INSNS (32), /* ddiv */
824 32, /* cache line size */
825 32, /* l1 cache */
826 1024, /* l2 cache */
827 1, /* streams */
828 0, /* SF->DF convert */
831 /* Instruction costs on PPC620 processors. */
832 static const
833 struct processor_costs ppc620_cost = {
834 COSTS_N_INSNS (5), /* mulsi */
835 COSTS_N_INSNS (4), /* mulsi_const */
836 COSTS_N_INSNS (3), /* mulsi_const9 */
837 COSTS_N_INSNS (7), /* muldi */
838 COSTS_N_INSNS (21), /* divsi */
839 COSTS_N_INSNS (37), /* divdi */
840 COSTS_N_INSNS (3), /* fp */
841 COSTS_N_INSNS (3), /* dmul */
842 COSTS_N_INSNS (18), /* sdiv */
843 COSTS_N_INSNS (32), /* ddiv */
844 128, /* cache line size */
845 32, /* l1 cache */
846 1024, /* l2 cache */
847 1, /* streams */
848 0, /* SF->DF convert */
851 /* Instruction costs on PPC630 processors. */
852 static const
853 struct processor_costs ppc630_cost = {
854 COSTS_N_INSNS (5), /* mulsi */
855 COSTS_N_INSNS (4), /* mulsi_const */
856 COSTS_N_INSNS (3), /* mulsi_const9 */
857 COSTS_N_INSNS (7), /* muldi */
858 COSTS_N_INSNS (21), /* divsi */
859 COSTS_N_INSNS (37), /* divdi */
860 COSTS_N_INSNS (3), /* fp */
861 COSTS_N_INSNS (3), /* dmul */
862 COSTS_N_INSNS (17), /* sdiv */
863 COSTS_N_INSNS (21), /* ddiv */
864 128, /* cache line size */
865 64, /* l1 cache */
866 1024, /* l2 cache */
867 1, /* streams */
868 0, /* SF->DF convert */
871 /* Instruction costs on Cell processor. */
872 /* COSTS_N_INSNS (1) ~ one add. */
873 static const
874 struct processor_costs ppccell_cost = {
875 COSTS_N_INSNS (9/2)+2, /* mulsi */
876 COSTS_N_INSNS (6/2), /* mulsi_const */
877 COSTS_N_INSNS (6/2), /* mulsi_const9 */
878 COSTS_N_INSNS (15/2)+2, /* muldi */
879 COSTS_N_INSNS (38/2), /* divsi */
880 COSTS_N_INSNS (70/2), /* divdi */
881 COSTS_N_INSNS (10/2), /* fp */
882 COSTS_N_INSNS (10/2), /* dmul */
883 COSTS_N_INSNS (74/2), /* sdiv */
884 COSTS_N_INSNS (74/2), /* ddiv */
885 128, /* cache line size */
886 32, /* l1 cache */
887 512, /* l2 cache */
888 6, /* streams */
889 0, /* SF->DF convert */
892 /* Instruction costs on PPC750 and PPC7400 processors. */
893 static const
894 struct processor_costs ppc750_cost = {
895 COSTS_N_INSNS (5), /* mulsi */
896 COSTS_N_INSNS (3), /* mulsi_const */
897 COSTS_N_INSNS (2), /* mulsi_const9 */
898 COSTS_N_INSNS (5), /* muldi */
899 COSTS_N_INSNS (17), /* divsi */
900 COSTS_N_INSNS (17), /* divdi */
901 COSTS_N_INSNS (3), /* fp */
902 COSTS_N_INSNS (3), /* dmul */
903 COSTS_N_INSNS (17), /* sdiv */
904 COSTS_N_INSNS (31), /* ddiv */
905 32, /* cache line size */
906 32, /* l1 cache */
907 512, /* l2 cache */
908 1, /* streams */
909 0, /* SF->DF convert */
912 /* Instruction costs on PPC7450 processors. */
913 static const
914 struct processor_costs ppc7450_cost = {
915 COSTS_N_INSNS (4), /* mulsi */
916 COSTS_N_INSNS (3), /* mulsi_const */
917 COSTS_N_INSNS (3), /* mulsi_const9 */
918 COSTS_N_INSNS (4), /* muldi */
919 COSTS_N_INSNS (23), /* divsi */
920 COSTS_N_INSNS (23), /* divdi */
921 COSTS_N_INSNS (5), /* fp */
922 COSTS_N_INSNS (5), /* dmul */
923 COSTS_N_INSNS (21), /* sdiv */
924 COSTS_N_INSNS (35), /* ddiv */
925 32, /* cache line size */
926 32, /* l1 cache */
927 1024, /* l2 cache */
928 1, /* streams */
929 0, /* SF->DF convert */
932 /* Instruction costs on PPC8540 processors. */
933 static const
934 struct processor_costs ppc8540_cost = {
935 COSTS_N_INSNS (4), /* mulsi */
936 COSTS_N_INSNS (4), /* mulsi_const */
937 COSTS_N_INSNS (4), /* mulsi_const9 */
938 COSTS_N_INSNS (4), /* muldi */
939 COSTS_N_INSNS (19), /* divsi */
940 COSTS_N_INSNS (19), /* divdi */
941 COSTS_N_INSNS (4), /* fp */
942 COSTS_N_INSNS (4), /* dmul */
943 COSTS_N_INSNS (29), /* sdiv */
944 COSTS_N_INSNS (29), /* ddiv */
945 32, /* cache line size */
946 32, /* l1 cache */
947 256, /* l2 cache */
948 1, /* prefetch streams /*/
949 0, /* SF->DF convert */
952 /* Instruction costs on E300C2 and E300C3 cores. */
953 static const
954 struct processor_costs ppce300c2c3_cost = {
955 COSTS_N_INSNS (4), /* mulsi */
956 COSTS_N_INSNS (4), /* mulsi_const */
957 COSTS_N_INSNS (4), /* mulsi_const9 */
958 COSTS_N_INSNS (4), /* muldi */
959 COSTS_N_INSNS (19), /* divsi */
960 COSTS_N_INSNS (19), /* divdi */
961 COSTS_N_INSNS (3), /* fp */
962 COSTS_N_INSNS (4), /* dmul */
963 COSTS_N_INSNS (18), /* sdiv */
964 COSTS_N_INSNS (33), /* ddiv */
966 16, /* l1 cache */
967 16, /* l2 cache */
968 1, /* prefetch streams /*/
969 0, /* SF->DF convert */
972 /* Instruction costs on PPCE500MC processors. */
973 static const
974 struct processor_costs ppce500mc_cost = {
975 COSTS_N_INSNS (4), /* mulsi */
976 COSTS_N_INSNS (4), /* mulsi_const */
977 COSTS_N_INSNS (4), /* mulsi_const9 */
978 COSTS_N_INSNS (4), /* muldi */
979 COSTS_N_INSNS (14), /* divsi */
980 COSTS_N_INSNS (14), /* divdi */
981 COSTS_N_INSNS (8), /* fp */
982 COSTS_N_INSNS (10), /* dmul */
983 COSTS_N_INSNS (36), /* sdiv */
984 COSTS_N_INSNS (66), /* ddiv */
985 64, /* cache line size */
986 32, /* l1 cache */
987 128, /* l2 cache */
988 1, /* prefetch streams /*/
989 0, /* SF->DF convert */
992 /* Instruction costs on PPCE500MC64 processors. */
993 static const
994 struct processor_costs ppce500mc64_cost = {
995 COSTS_N_INSNS (4), /* mulsi */
996 COSTS_N_INSNS (4), /* mulsi_const */
997 COSTS_N_INSNS (4), /* mulsi_const9 */
998 COSTS_N_INSNS (4), /* muldi */
999 COSTS_N_INSNS (14), /* divsi */
1000 COSTS_N_INSNS (14), /* divdi */
1001 COSTS_N_INSNS (4), /* fp */
1002 COSTS_N_INSNS (10), /* dmul */
1003 COSTS_N_INSNS (36), /* sdiv */
1004 COSTS_N_INSNS (66), /* ddiv */
1005 64, /* cache line size */
1006 32, /* l1 cache */
1007 128, /* l2 cache */
1008 1, /* prefetch streams /*/
1009 0, /* SF->DF convert */
1012 /* Instruction costs on PPCE5500 processors. */
1013 static const
1014 struct processor_costs ppce5500_cost = {
1015 COSTS_N_INSNS (5), /* mulsi */
1016 COSTS_N_INSNS (5), /* mulsi_const */
1017 COSTS_N_INSNS (4), /* mulsi_const9 */
1018 COSTS_N_INSNS (5), /* muldi */
1019 COSTS_N_INSNS (14), /* divsi */
1020 COSTS_N_INSNS (14), /* divdi */
1021 COSTS_N_INSNS (7), /* fp */
1022 COSTS_N_INSNS (10), /* dmul */
1023 COSTS_N_INSNS (36), /* sdiv */
1024 COSTS_N_INSNS (66), /* ddiv */
1025 64, /* cache line size */
1026 32, /* l1 cache */
1027 128, /* l2 cache */
1028 1, /* prefetch streams /*/
1029 0, /* SF->DF convert */
1032 /* Instruction costs on PPCE6500 processors. */
1033 static const
1034 struct processor_costs ppce6500_cost = {
1035 COSTS_N_INSNS (5), /* mulsi */
1036 COSTS_N_INSNS (5), /* mulsi_const */
1037 COSTS_N_INSNS (4), /* mulsi_const9 */
1038 COSTS_N_INSNS (5), /* muldi */
1039 COSTS_N_INSNS (14), /* divsi */
1040 COSTS_N_INSNS (14), /* divdi */
1041 COSTS_N_INSNS (7), /* fp */
1042 COSTS_N_INSNS (10), /* dmul */
1043 COSTS_N_INSNS (36), /* sdiv */
1044 COSTS_N_INSNS (66), /* ddiv */
1045 64, /* cache line size */
1046 32, /* l1 cache */
1047 128, /* l2 cache */
1048 1, /* prefetch streams /*/
1049 0, /* SF->DF convert */
1052 /* Instruction costs on AppliedMicro Titan processors. */
1053 static const
1054 struct processor_costs titan_cost = {
1055 COSTS_N_INSNS (5), /* mulsi */
1056 COSTS_N_INSNS (5), /* mulsi_const */
1057 COSTS_N_INSNS (5), /* mulsi_const9 */
1058 COSTS_N_INSNS (5), /* muldi */
1059 COSTS_N_INSNS (18), /* divsi */
1060 COSTS_N_INSNS (18), /* divdi */
1061 COSTS_N_INSNS (10), /* fp */
1062 COSTS_N_INSNS (10), /* dmul */
1063 COSTS_N_INSNS (46), /* sdiv */
1064 COSTS_N_INSNS (72), /* ddiv */
1065 32, /* cache line size */
1066 32, /* l1 cache */
1067 512, /* l2 cache */
1068 1, /* prefetch streams /*/
1069 0, /* SF->DF convert */
1072 /* Instruction costs on POWER4 and POWER5 processors. */
1073 static const
1074 struct processor_costs power4_cost = {
1075 COSTS_N_INSNS (3), /* mulsi */
1076 COSTS_N_INSNS (2), /* mulsi_const */
1077 COSTS_N_INSNS (2), /* mulsi_const9 */
1078 COSTS_N_INSNS (4), /* muldi */
1079 COSTS_N_INSNS (18), /* divsi */
1080 COSTS_N_INSNS (34), /* divdi */
1081 COSTS_N_INSNS (3), /* fp */
1082 COSTS_N_INSNS (3), /* dmul */
1083 COSTS_N_INSNS (17), /* sdiv */
1084 COSTS_N_INSNS (17), /* ddiv */
1085 128, /* cache line size */
1086 32, /* l1 cache */
1087 1024, /* l2 cache */
1088 8, /* prefetch streams /*/
1089 0, /* SF->DF convert */
1092 /* Instruction costs on POWER6 processors. */
1093 static const
1094 struct processor_costs power6_cost = {
1095 COSTS_N_INSNS (8), /* mulsi */
1096 COSTS_N_INSNS (8), /* mulsi_const */
1097 COSTS_N_INSNS (8), /* mulsi_const9 */
1098 COSTS_N_INSNS (8), /* muldi */
1099 COSTS_N_INSNS (22), /* divsi */
1100 COSTS_N_INSNS (28), /* divdi */
1101 COSTS_N_INSNS (3), /* fp */
1102 COSTS_N_INSNS (3), /* dmul */
1103 COSTS_N_INSNS (13), /* sdiv */
1104 COSTS_N_INSNS (16), /* ddiv */
1105 128, /* cache line size */
1106 64, /* l1 cache */
1107 2048, /* l2 cache */
1108 16, /* prefetch streams */
1109 0, /* SF->DF convert */
1112 /* Instruction costs on POWER7 processors. */
1113 static const
1114 struct processor_costs power7_cost = {
1115 COSTS_N_INSNS (2), /* mulsi */
1116 COSTS_N_INSNS (2), /* mulsi_const */
1117 COSTS_N_INSNS (2), /* mulsi_const9 */
1118 COSTS_N_INSNS (2), /* muldi */
1119 COSTS_N_INSNS (18), /* divsi */
1120 COSTS_N_INSNS (34), /* divdi */
1121 COSTS_N_INSNS (3), /* fp */
1122 COSTS_N_INSNS (3), /* dmul */
1123 COSTS_N_INSNS (13), /* sdiv */
1124 COSTS_N_INSNS (16), /* ddiv */
1125 128, /* cache line size */
1126 32, /* l1 cache */
1127 256, /* l2 cache */
1128 12, /* prefetch streams */
1129 COSTS_N_INSNS (3), /* SF->DF convert */
1132 /* Instruction costs on POWER8 processors. */
1133 static const
1134 struct processor_costs power8_cost = {
1135 COSTS_N_INSNS (3), /* mulsi */
1136 COSTS_N_INSNS (3), /* mulsi_const */
1137 COSTS_N_INSNS (3), /* mulsi_const9 */
1138 COSTS_N_INSNS (3), /* muldi */
1139 COSTS_N_INSNS (19), /* divsi */
1140 COSTS_N_INSNS (35), /* divdi */
1141 COSTS_N_INSNS (3), /* fp */
1142 COSTS_N_INSNS (3), /* dmul */
1143 COSTS_N_INSNS (14), /* sdiv */
1144 COSTS_N_INSNS (17), /* ddiv */
1145 128, /* cache line size */
1146 32, /* l1 cache */
1147 256, /* l2 cache */
1148 12, /* prefetch streams */
1149 COSTS_N_INSNS (3), /* SF->DF convert */
1152 /* Instruction costs on POWER9 processors. */
1153 static const
1154 struct processor_costs power9_cost = {
1155 COSTS_N_INSNS (3), /* mulsi */
1156 COSTS_N_INSNS (3), /* mulsi_const */
1157 COSTS_N_INSNS (3), /* mulsi_const9 */
1158 COSTS_N_INSNS (3), /* muldi */
1159 COSTS_N_INSNS (8), /* divsi */
1160 COSTS_N_INSNS (12), /* divdi */
1161 COSTS_N_INSNS (3), /* fp */
1162 COSTS_N_INSNS (3), /* dmul */
1163 COSTS_N_INSNS (13), /* sdiv */
1164 COSTS_N_INSNS (18), /* ddiv */
1165 128, /* cache line size */
1166 32, /* l1 cache */
1167 512, /* l2 cache */
1168 8, /* prefetch streams */
1169 COSTS_N_INSNS (3), /* SF->DF convert */
1172 /* Instruction costs on POWER A2 processors. */
1173 static const
1174 struct processor_costs ppca2_cost = {
1175 COSTS_N_INSNS (16), /* mulsi */
1176 COSTS_N_INSNS (16), /* mulsi_const */
1177 COSTS_N_INSNS (16), /* mulsi_const9 */
1178 COSTS_N_INSNS (16), /* muldi */
1179 COSTS_N_INSNS (22), /* divsi */
1180 COSTS_N_INSNS (28), /* divdi */
1181 COSTS_N_INSNS (3), /* fp */
1182 COSTS_N_INSNS (3), /* dmul */
1183 COSTS_N_INSNS (59), /* sdiv */
1184 COSTS_N_INSNS (72), /* ddiv */
1186 16, /* l1 cache */
1187 2048, /* l2 cache */
1188 16, /* prefetch streams */
1189 0, /* SF->DF convert */
1193 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
1194 #undef RS6000_BUILTIN_0
1195 #undef RS6000_BUILTIN_1
1196 #undef RS6000_BUILTIN_2
1197 #undef RS6000_BUILTIN_3
1198 #undef RS6000_BUILTIN_A
1199 #undef RS6000_BUILTIN_D
1200 #undef RS6000_BUILTIN_H
1201 #undef RS6000_BUILTIN_P
1202 #undef RS6000_BUILTIN_X
1204 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
1205 { NAME, ICODE, MASK, ATTR },
1207 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1208 { NAME, ICODE, MASK, ATTR },
1210 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1211 { NAME, ICODE, MASK, ATTR },
1213 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1214 { NAME, ICODE, MASK, ATTR },
1216 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1217 { NAME, ICODE, MASK, ATTR },
1219 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1220 { NAME, ICODE, MASK, ATTR },
1222 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1223 { NAME, ICODE, MASK, ATTR },
1225 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1226 { NAME, ICODE, MASK, ATTR },
1228 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1229 { NAME, ICODE, MASK, ATTR },
1231 struct rs6000_builtin_info_type {
1232 const char *name;
1233 const enum insn_code icode;
1234 const HOST_WIDE_INT mask;
1235 const unsigned attr;
1238 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
1240 #include "rs6000-builtin.def"
1243 #undef RS6000_BUILTIN_0
1244 #undef RS6000_BUILTIN_1
1245 #undef RS6000_BUILTIN_2
1246 #undef RS6000_BUILTIN_3
1247 #undef RS6000_BUILTIN_A
1248 #undef RS6000_BUILTIN_D
1249 #undef RS6000_BUILTIN_H
1250 #undef RS6000_BUILTIN_P
1251 #undef RS6000_BUILTIN_X
1253 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1254 static tree (*rs6000_veclib_handler) (combined_fn, tree, tree);
1257 static bool rs6000_debug_legitimate_address_p (machine_mode, rtx, bool);
1258 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
1259 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
1260 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
1261 static tree rs6000_builtin_vectorized_libmass (combined_fn, tree, tree);
1262 static void rs6000_emit_set_long_const (rtx, HOST_WIDE_INT);
1263 static int rs6000_memory_move_cost (machine_mode, reg_class_t, bool);
1264 static bool rs6000_debug_rtx_costs (rtx, machine_mode, int, int, int *, bool);
1265 static int rs6000_debug_address_cost (rtx, machine_mode, addr_space_t,
1266 bool);
1267 static int rs6000_debug_adjust_cost (rtx_insn *, int, rtx_insn *, int,
1268 unsigned int);
1269 static bool is_microcoded_insn (rtx_insn *);
1270 static bool is_nonpipeline_insn (rtx_insn *);
1271 static bool is_cracked_insn (rtx_insn *);
1272 static bool is_load_insn (rtx, rtx *);
1273 static bool is_store_insn (rtx, rtx *);
1274 static bool set_to_load_agen (rtx_insn *,rtx_insn *);
1275 static bool insn_terminates_group_p (rtx_insn *, enum group_termination);
1276 static bool insn_must_be_first_in_group (rtx_insn *);
1277 static bool insn_must_be_last_in_group (rtx_insn *);
1278 static void altivec_init_builtins (void);
1279 static tree builtin_function_type (machine_mode, machine_mode,
1280 machine_mode, machine_mode,
1281 enum rs6000_builtins, const char *name);
1282 static void rs6000_common_init_builtins (void);
1283 static void htm_init_builtins (void);
1284 int easy_vector_constant (rtx, machine_mode);
1285 static rtx rs6000_debug_legitimize_address (rtx, rtx, machine_mode);
1286 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
1287 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
1288 bool, bool);
1289 #if TARGET_MACHO
1290 static tree get_prev_label (tree);
1291 #endif
1292 static bool rs6000_mode_dependent_address (const_rtx);
1293 static bool rs6000_debug_mode_dependent_address (const_rtx);
1294 static bool rs6000_offsettable_memref_p (rtx, machine_mode, bool);
1295 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
1296 machine_mode, rtx);
1297 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
1298 machine_mode,
1299 rtx);
1300 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
1301 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
1302 enum reg_class);
1303 static bool rs6000_debug_secondary_memory_needed (machine_mode,
1304 reg_class_t,
1305 reg_class_t);
1306 static bool rs6000_debug_can_change_mode_class (machine_mode,
1307 machine_mode,
1308 reg_class_t);
1309 static rtx rs6000_internal_arg_pointer (void);
1311 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
1312 = rs6000_mode_dependent_address;
1314 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1315 machine_mode, rtx)
1316 = rs6000_secondary_reload_class;
1318 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1319 = rs6000_preferred_reload_class;
1321 const int INSN_NOT_AVAILABLE = -1;
1323 static void rs6000_print_isa_options (FILE *, int, const char *,
1324 HOST_WIDE_INT);
1325 static void rs6000_print_builtin_options (FILE *, int, const char *,
1326 HOST_WIDE_INT);
1327 static HOST_WIDE_INT rs6000_disable_incompatible_switches (void);
1329 static enum rs6000_reg_type register_to_reg_type (rtx, bool *);
1330 static bool rs6000_secondary_reload_move (enum rs6000_reg_type,
1331 enum rs6000_reg_type,
1332 machine_mode,
1333 secondary_reload_info *,
1334 bool);
1335 rtl_opt_pass *make_pass_analyze_swaps (gcc::context*);
1336 static tree rs6000_fold_builtin (tree, int, tree *, bool);
1338 /* Hash table stuff for keeping track of TOC entries. */
1340 struct GTY((for_user)) toc_hash_struct
1342 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1343 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1344 rtx key;
1345 machine_mode key_mode;
1346 int labelno;
1349 struct toc_hasher : ggc_ptr_hash<toc_hash_struct>
1351 static hashval_t hash (toc_hash_struct *);
1352 static bool equal (toc_hash_struct *, toc_hash_struct *);
1355 static GTY (()) hash_table<toc_hasher> *toc_hash_table;
1357 /* Hash table to keep track of the argument types for builtin functions. */
1359 struct GTY((for_user)) builtin_hash_struct
1361 tree type;
1362 machine_mode mode[4]; /* return value + 3 arguments. */
1363 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1366 struct builtin_hasher : ggc_ptr_hash<builtin_hash_struct>
1368 static hashval_t hash (builtin_hash_struct *);
1369 static bool equal (builtin_hash_struct *, builtin_hash_struct *);
1372 static GTY (()) hash_table<builtin_hasher> *builtin_hash_table;
1375 /* Default register names. */
1376 char rs6000_reg_names[][8] =
1378 /* GPRs */
1379 "0", "1", "2", "3", "4", "5", "6", "7",
1380 "8", "9", "10", "11", "12", "13", "14", "15",
1381 "16", "17", "18", "19", "20", "21", "22", "23",
1382 "24", "25", "26", "27", "28", "29", "30", "31",
1383 /* FPRs */
1384 "0", "1", "2", "3", "4", "5", "6", "7",
1385 "8", "9", "10", "11", "12", "13", "14", "15",
1386 "16", "17", "18", "19", "20", "21", "22", "23",
1387 "24", "25", "26", "27", "28", "29", "30", "31",
1388 /* VRs */
1389 "0", "1", "2", "3", "4", "5", "6", "7",
1390 "8", "9", "10", "11", "12", "13", "14", "15",
1391 "16", "17", "18", "19", "20", "21", "22", "23",
1392 "24", "25", "26", "27", "28", "29", "30", "31",
1393 /* lr ctr ca ap */
1394 "lr", "ctr", "ca", "ap",
1395 /* cr0..cr7 */
1396 "0", "1", "2", "3", "4", "5", "6", "7",
1397 /* vrsave vscr sfp */
1398 "vrsave", "vscr", "sfp",
1401 #ifdef TARGET_REGNAMES
1402 static const char alt_reg_names[][8] =
1404 /* GPRs */
1405 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1406 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1407 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1408 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1409 /* FPRs */
1410 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1411 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1412 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1413 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1414 /* VRs */
1415 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1416 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1417 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1418 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1419 /* lr ctr ca ap */
1420 "lr", "ctr", "ca", "ap",
1421 /* cr0..cr7 */
1422 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1423 /* vrsave vscr sfp */
1424 "vrsave", "vscr", "sfp",
1426 #endif
1428 /* Table of valid machine attributes. */
1430 static const struct attribute_spec rs6000_attribute_table[] =
1432 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
1433 affects_type_identity, handler, exclude } */
1434 { "altivec", 1, 1, false, true, false, false,
1435 rs6000_handle_altivec_attribute, NULL },
1436 { "longcall", 0, 0, false, true, true, false,
1437 rs6000_handle_longcall_attribute, NULL },
1438 { "shortcall", 0, 0, false, true, true, false,
1439 rs6000_handle_longcall_attribute, NULL },
1440 { "ms_struct", 0, 0, false, false, false, false,
1441 rs6000_handle_struct_attribute, NULL },
1442 { "gcc_struct", 0, 0, false, false, false, false,
1443 rs6000_handle_struct_attribute, NULL },
1444 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1445 SUBTARGET_ATTRIBUTE_TABLE,
1446 #endif
1447 { NULL, 0, 0, false, false, false, false, NULL, NULL }
1450 #ifndef TARGET_PROFILE_KERNEL
1451 #define TARGET_PROFILE_KERNEL 0
1452 #endif
1454 /* Initialize the GCC target structure. */
1455 #undef TARGET_ATTRIBUTE_TABLE
1456 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1457 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1458 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1459 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1460 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1462 #undef TARGET_ASM_ALIGNED_DI_OP
1463 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1465 /* Default unaligned ops are only provided for ELF. Find the ops needed
1466 for non-ELF systems. */
1467 #ifndef OBJECT_FORMAT_ELF
1468 #if TARGET_XCOFF
1469 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1470 64-bit targets. */
1471 #undef TARGET_ASM_UNALIGNED_HI_OP
1472 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1473 #undef TARGET_ASM_UNALIGNED_SI_OP
1474 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1475 #undef TARGET_ASM_UNALIGNED_DI_OP
1476 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1477 #else
1478 /* For Darwin. */
1479 #undef TARGET_ASM_UNALIGNED_HI_OP
1480 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1481 #undef TARGET_ASM_UNALIGNED_SI_OP
1482 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1483 #undef TARGET_ASM_UNALIGNED_DI_OP
1484 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1485 #undef TARGET_ASM_ALIGNED_DI_OP
1486 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1487 #endif
1488 #endif
1490 /* This hook deals with fixups for relocatable code and DI-mode objects
1491 in 64-bit code. */
1492 #undef TARGET_ASM_INTEGER
1493 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1495 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1496 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1497 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1498 #endif
1500 #undef TARGET_SET_UP_BY_PROLOGUE
1501 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1503 #undef TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS
1504 #define TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS rs6000_get_separate_components
1505 #undef TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB
1506 #define TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB rs6000_components_for_bb
1507 #undef TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS
1508 #define TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS rs6000_disqualify_components
1509 #undef TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS
1510 #define TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS rs6000_emit_prologue_components
1511 #undef TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS
1512 #define TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS rs6000_emit_epilogue_components
1513 #undef TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS
1514 #define TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS rs6000_set_handled_components
1516 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1517 #define TARGET_EXTRA_LIVE_ON_ENTRY rs6000_live_on_entry
1519 #undef TARGET_INTERNAL_ARG_POINTER
1520 #define TARGET_INTERNAL_ARG_POINTER rs6000_internal_arg_pointer
1522 #undef TARGET_HAVE_TLS
1523 #define TARGET_HAVE_TLS HAVE_AS_TLS
1525 #undef TARGET_CANNOT_FORCE_CONST_MEM
1526 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1528 #undef TARGET_DELEGITIMIZE_ADDRESS
1529 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1531 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1532 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1534 #undef TARGET_LEGITIMATE_COMBINED_INSN
1535 #define TARGET_LEGITIMATE_COMBINED_INSN rs6000_legitimate_combined_insn
1537 #undef TARGET_ASM_FUNCTION_PROLOGUE
1538 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1539 #undef TARGET_ASM_FUNCTION_EPILOGUE
1540 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1542 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1543 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1545 #undef TARGET_LEGITIMIZE_ADDRESS
1546 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1548 #undef TARGET_SCHED_VARIABLE_ISSUE
1549 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1551 #undef TARGET_SCHED_ISSUE_RATE
1552 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1553 #undef TARGET_SCHED_ADJUST_COST
1554 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1555 #undef TARGET_SCHED_ADJUST_PRIORITY
1556 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1557 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1558 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1559 #undef TARGET_SCHED_INIT
1560 #define TARGET_SCHED_INIT rs6000_sched_init
1561 #undef TARGET_SCHED_FINISH
1562 #define TARGET_SCHED_FINISH rs6000_sched_finish
1563 #undef TARGET_SCHED_REORDER
1564 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1565 #undef TARGET_SCHED_REORDER2
1566 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1568 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1569 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1571 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1572 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1574 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1575 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1576 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1577 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1578 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1579 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1580 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1581 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1583 #undef TARGET_SCHED_CAN_SPECULATE_INSN
1584 #define TARGET_SCHED_CAN_SPECULATE_INSN rs6000_sched_can_speculate_insn
1586 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1587 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1588 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1589 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1590 rs6000_builtin_support_vector_misalignment
1591 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1592 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1593 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1594 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1595 rs6000_builtin_vectorization_cost
1596 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1597 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1598 rs6000_preferred_simd_mode
1599 #undef TARGET_VECTORIZE_INIT_COST
1600 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1601 #undef TARGET_VECTORIZE_ADD_STMT_COST
1602 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1603 #undef TARGET_VECTORIZE_FINISH_COST
1604 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1605 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1606 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1608 #undef TARGET_INIT_BUILTINS
1609 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1610 #undef TARGET_BUILTIN_DECL
1611 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1613 #undef TARGET_FOLD_BUILTIN
1614 #define TARGET_FOLD_BUILTIN rs6000_fold_builtin
1615 #undef TARGET_GIMPLE_FOLD_BUILTIN
1616 #define TARGET_GIMPLE_FOLD_BUILTIN rs6000_gimple_fold_builtin
1618 #undef TARGET_EXPAND_BUILTIN
1619 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1621 #undef TARGET_MANGLE_TYPE
1622 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1624 #undef TARGET_INIT_LIBFUNCS
1625 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1627 #if TARGET_MACHO
1628 #undef TARGET_BINDS_LOCAL_P
1629 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1630 #endif
1632 #undef TARGET_MS_BITFIELD_LAYOUT_P
1633 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1635 #undef TARGET_ASM_OUTPUT_MI_THUNK
1636 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1638 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1639 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1641 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1642 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1644 #undef TARGET_REGISTER_MOVE_COST
1645 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1646 #undef TARGET_MEMORY_MOVE_COST
1647 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1648 #undef TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS
1649 #define TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS \
1650 rs6000_ira_change_pseudo_allocno_class
1651 #undef TARGET_CANNOT_COPY_INSN_P
1652 #define TARGET_CANNOT_COPY_INSN_P rs6000_cannot_copy_insn_p
1653 #undef TARGET_RTX_COSTS
1654 #define TARGET_RTX_COSTS rs6000_rtx_costs
1655 #undef TARGET_ADDRESS_COST
1656 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1657 #undef TARGET_INSN_COST
1658 #define TARGET_INSN_COST rs6000_insn_cost
1660 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1661 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1663 #undef TARGET_PROMOTE_FUNCTION_MODE
1664 #define TARGET_PROMOTE_FUNCTION_MODE rs6000_promote_function_mode
1666 #undef TARGET_RETURN_IN_MEMORY
1667 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1669 #undef TARGET_RETURN_IN_MSB
1670 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1672 #undef TARGET_SETUP_INCOMING_VARARGS
1673 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1675 /* Always strict argument naming on rs6000. */
1676 #undef TARGET_STRICT_ARGUMENT_NAMING
1677 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1678 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1679 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1680 #undef TARGET_SPLIT_COMPLEX_ARG
1681 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1682 #undef TARGET_MUST_PASS_IN_STACK
1683 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1684 #undef TARGET_PASS_BY_REFERENCE
1685 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1686 #undef TARGET_ARG_PARTIAL_BYTES
1687 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1688 #undef TARGET_FUNCTION_ARG_ADVANCE
1689 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1690 #undef TARGET_FUNCTION_ARG
1691 #define TARGET_FUNCTION_ARG rs6000_function_arg
1692 #undef TARGET_FUNCTION_ARG_PADDING
1693 #define TARGET_FUNCTION_ARG_PADDING rs6000_function_arg_padding
1694 #undef TARGET_FUNCTION_ARG_BOUNDARY
1695 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1697 #undef TARGET_BUILD_BUILTIN_VA_LIST
1698 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1700 #undef TARGET_EXPAND_BUILTIN_VA_START
1701 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1703 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1704 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1706 #undef TARGET_EH_RETURN_FILTER_MODE
1707 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1709 #undef TARGET_TRANSLATE_MODE_ATTRIBUTE
1710 #define TARGET_TRANSLATE_MODE_ATTRIBUTE rs6000_translate_mode_attribute
1712 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1713 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1715 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1716 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1718 #undef TARGET_FLOATN_MODE
1719 #define TARGET_FLOATN_MODE rs6000_floatn_mode
1721 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1722 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1724 #undef TARGET_MD_ASM_ADJUST
1725 #define TARGET_MD_ASM_ADJUST rs6000_md_asm_adjust
1727 #undef TARGET_OPTION_OVERRIDE
1728 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1730 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1731 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1732 rs6000_builtin_vectorized_function
1734 #undef TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION
1735 #define TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION \
1736 rs6000_builtin_md_vectorized_function
1738 #undef TARGET_STACK_PROTECT_GUARD
1739 #define TARGET_STACK_PROTECT_GUARD rs6000_init_stack_protect_guard
1741 #if !TARGET_MACHO
1742 #undef TARGET_STACK_PROTECT_FAIL
1743 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1744 #endif
1746 #ifdef HAVE_AS_TLS
1747 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1748 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1749 #endif
1751 /* Use a 32-bit anchor range. This leads to sequences like:
1753 addis tmp,anchor,high
1754 add dest,tmp,low
1756 where tmp itself acts as an anchor, and can be shared between
1757 accesses to the same 64k page. */
1758 #undef TARGET_MIN_ANCHOR_OFFSET
1759 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1760 #undef TARGET_MAX_ANCHOR_OFFSET
1761 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1762 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1763 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1764 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1765 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1767 #undef TARGET_BUILTIN_RECIPROCAL
1768 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1770 #undef TARGET_SECONDARY_RELOAD
1771 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1772 #undef TARGET_SECONDARY_MEMORY_NEEDED
1773 #define TARGET_SECONDARY_MEMORY_NEEDED rs6000_secondary_memory_needed
1774 #undef TARGET_SECONDARY_MEMORY_NEEDED_MODE
1775 #define TARGET_SECONDARY_MEMORY_NEEDED_MODE rs6000_secondary_memory_needed_mode
1777 #undef TARGET_LEGITIMATE_ADDRESS_P
1778 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1780 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1781 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1783 #undef TARGET_COMPUTE_PRESSURE_CLASSES
1784 #define TARGET_COMPUTE_PRESSURE_CLASSES rs6000_compute_pressure_classes
1786 #undef TARGET_CAN_ELIMINATE
1787 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1789 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1790 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1792 #undef TARGET_SCHED_REASSOCIATION_WIDTH
1793 #define TARGET_SCHED_REASSOCIATION_WIDTH rs6000_reassociation_width
1795 #undef TARGET_TRAMPOLINE_INIT
1796 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1798 #undef TARGET_FUNCTION_VALUE
1799 #define TARGET_FUNCTION_VALUE rs6000_function_value
1801 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1802 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1804 #undef TARGET_OPTION_SAVE
1805 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1807 #undef TARGET_OPTION_RESTORE
1808 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1810 #undef TARGET_OPTION_PRINT
1811 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1813 #undef TARGET_CAN_INLINE_P
1814 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1816 #undef TARGET_SET_CURRENT_FUNCTION
1817 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1819 #undef TARGET_LEGITIMATE_CONSTANT_P
1820 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1822 #undef TARGET_VECTORIZE_VEC_PERM_CONST
1823 #define TARGET_VECTORIZE_VEC_PERM_CONST rs6000_vectorize_vec_perm_const
1825 #undef TARGET_CAN_USE_DOLOOP_P
1826 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1828 #undef TARGET_PREDICT_DOLOOP_P
1829 #define TARGET_PREDICT_DOLOOP_P rs6000_predict_doloop_p
1831 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
1832 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV rs6000_atomic_assign_expand_fenv
1834 #undef TARGET_LIBGCC_CMP_RETURN_MODE
1835 #define TARGET_LIBGCC_CMP_RETURN_MODE rs6000_abi_word_mode
1836 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
1837 #define TARGET_LIBGCC_SHIFT_COUNT_MODE rs6000_abi_word_mode
1838 #undef TARGET_UNWIND_WORD_MODE
1839 #define TARGET_UNWIND_WORD_MODE rs6000_abi_word_mode
1841 #undef TARGET_OFFLOAD_OPTIONS
1842 #define TARGET_OFFLOAD_OPTIONS rs6000_offload_options
1844 #undef TARGET_C_MODE_FOR_SUFFIX
1845 #define TARGET_C_MODE_FOR_SUFFIX rs6000_c_mode_for_suffix
1847 #undef TARGET_INVALID_BINARY_OP
1848 #define TARGET_INVALID_BINARY_OP rs6000_invalid_binary_op
1850 #undef TARGET_OPTAB_SUPPORTED_P
1851 #define TARGET_OPTAB_SUPPORTED_P rs6000_optab_supported_p
1853 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
1854 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
1856 #undef TARGET_COMPARE_VERSION_PRIORITY
1857 #define TARGET_COMPARE_VERSION_PRIORITY rs6000_compare_version_priority
1859 #undef TARGET_GENERATE_VERSION_DISPATCHER_BODY
1860 #define TARGET_GENERATE_VERSION_DISPATCHER_BODY \
1861 rs6000_generate_version_dispatcher_body
1863 #undef TARGET_GET_FUNCTION_VERSIONS_DISPATCHER
1864 #define TARGET_GET_FUNCTION_VERSIONS_DISPATCHER \
1865 rs6000_get_function_versions_dispatcher
1867 #undef TARGET_OPTION_FUNCTION_VERSIONS
1868 #define TARGET_OPTION_FUNCTION_VERSIONS common_function_versions
1870 #undef TARGET_HARD_REGNO_NREGS
1871 #define TARGET_HARD_REGNO_NREGS rs6000_hard_regno_nregs_hook
1872 #undef TARGET_HARD_REGNO_MODE_OK
1873 #define TARGET_HARD_REGNO_MODE_OK rs6000_hard_regno_mode_ok
1875 #undef TARGET_MODES_TIEABLE_P
1876 #define TARGET_MODES_TIEABLE_P rs6000_modes_tieable_p
1878 #undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED
1879 #define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \
1880 rs6000_hard_regno_call_part_clobbered
1882 #undef TARGET_SLOW_UNALIGNED_ACCESS
1883 #define TARGET_SLOW_UNALIGNED_ACCESS rs6000_slow_unaligned_access
1885 #undef TARGET_CAN_CHANGE_MODE_CLASS
1886 #define TARGET_CAN_CHANGE_MODE_CLASS rs6000_can_change_mode_class
1888 #undef TARGET_CONSTANT_ALIGNMENT
1889 #define TARGET_CONSTANT_ALIGNMENT rs6000_constant_alignment
1891 #undef TARGET_STARTING_FRAME_OFFSET
1892 #define TARGET_STARTING_FRAME_OFFSET rs6000_starting_frame_offset
1894 #if TARGET_ELF && RS6000_WEAK
1895 #undef TARGET_ASM_GLOBALIZE_DECL_NAME
1896 #define TARGET_ASM_GLOBALIZE_DECL_NAME rs6000_globalize_decl_name
1897 #endif
1899 #undef TARGET_SETJMP_PRESERVES_NONVOLATILE_REGS_P
1900 #define TARGET_SETJMP_PRESERVES_NONVOLATILE_REGS_P hook_bool_void_true
1902 #undef TARGET_MANGLE_DECL_ASSEMBLER_NAME
1903 #define TARGET_MANGLE_DECL_ASSEMBLER_NAME rs6000_mangle_decl_assembler_name
1906 /* Processor table. */
1907 struct rs6000_ptt
1909 const char *const name; /* Canonical processor name. */
1910 const enum processor_type processor; /* Processor type enum value. */
1911 const HOST_WIDE_INT target_enable; /* Target flags to enable. */
1914 static struct rs6000_ptt const processor_target_table[] =
1916 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1917 #include "rs6000-cpus.def"
1918 #undef RS6000_CPU
1921 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
1922 name is invalid. */
1924 static int
1925 rs6000_cpu_name_lookup (const char *name)
1927 size_t i;
1929 if (name != NULL)
1931 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
1932 if (! strcmp (name, processor_target_table[i].name))
1933 return (int)i;
1936 return -1;
1940 /* Return number of consecutive hard regs needed starting at reg REGNO
1941 to hold something of mode MODE.
1942 This is ordinarily the length in words of a value of mode MODE
1943 but can be less for certain modes in special long registers.
1945 POWER and PowerPC GPRs hold 32 bits worth;
1946 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
1948 static int
1949 rs6000_hard_regno_nregs_internal (int regno, machine_mode mode)
1951 unsigned HOST_WIDE_INT reg_size;
1953 /* 128-bit floating point usually takes 2 registers, unless it is IEEE
1954 128-bit floating point that can go in vector registers, which has VSX
1955 memory addressing. */
1956 if (FP_REGNO_P (regno))
1957 reg_size = (VECTOR_MEM_VSX_P (mode) || FLOAT128_VECTOR_P (mode)
1958 ? UNITS_PER_VSX_WORD
1959 : UNITS_PER_FP_WORD);
1961 else if (ALTIVEC_REGNO_P (regno))
1962 reg_size = UNITS_PER_ALTIVEC_WORD;
1964 else
1965 reg_size = UNITS_PER_WORD;
1967 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
1970 /* Value is 1 if hard register REGNO can hold a value of machine-mode
1971 MODE. */
1972 static int
1973 rs6000_hard_regno_mode_ok_uncached (int regno, machine_mode mode)
1975 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
1977 if (COMPLEX_MODE_P (mode))
1978 mode = GET_MODE_INNER (mode);
1980 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
1981 register combinations, and use PTImode where we need to deal with quad
1982 word memory operations. Don't allow quad words in the argument or frame
1983 pointer registers, just registers 0..31. */
1984 if (mode == PTImode)
1985 return (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
1986 && IN_RANGE (last_regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
1987 && ((regno & 1) == 0));
1989 /* VSX registers that overlap the FPR registers are larger than for non-VSX
1990 implementations. Don't allow an item to be split between a FP register
1991 and an Altivec register. Allow TImode in all VSX registers if the user
1992 asked for it. */
1993 if (TARGET_VSX && VSX_REGNO_P (regno)
1994 && (VECTOR_MEM_VSX_P (mode)
1995 || FLOAT128_VECTOR_P (mode)
1996 || reg_addr[mode].scalar_in_vmx_p
1997 || mode == TImode
1998 || (TARGET_VADDUQM && mode == V1TImode)))
2000 if (FP_REGNO_P (regno))
2001 return FP_REGNO_P (last_regno);
2003 if (ALTIVEC_REGNO_P (regno))
2005 if (GET_MODE_SIZE (mode) != 16 && !reg_addr[mode].scalar_in_vmx_p)
2006 return 0;
2008 return ALTIVEC_REGNO_P (last_regno);
2012 /* The GPRs can hold any mode, but values bigger than one register
2013 cannot go past R31. */
2014 if (INT_REGNO_P (regno))
2015 return INT_REGNO_P (last_regno);
2017 /* The float registers (except for VSX vector modes) can only hold floating
2018 modes and DImode. */
2019 if (FP_REGNO_P (regno))
2021 if (FLOAT128_VECTOR_P (mode))
2022 return false;
2024 if (SCALAR_FLOAT_MODE_P (mode)
2025 && (mode != TDmode || (regno % 2) == 0)
2026 && FP_REGNO_P (last_regno))
2027 return 1;
2029 if (GET_MODE_CLASS (mode) == MODE_INT)
2031 if(GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
2032 return 1;
2034 if (TARGET_P8_VECTOR && (mode == SImode))
2035 return 1;
2037 if (TARGET_P9_VECTOR && (mode == QImode || mode == HImode))
2038 return 1;
2041 return 0;
2044 /* The CR register can only hold CC modes. */
2045 if (CR_REGNO_P (regno))
2046 return GET_MODE_CLASS (mode) == MODE_CC;
2048 if (CA_REGNO_P (regno))
2049 return mode == Pmode || mode == SImode;
2051 /* AltiVec only in AldyVec registers. */
2052 if (ALTIVEC_REGNO_P (regno))
2053 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
2054 || mode == V1TImode);
2056 /* We cannot put non-VSX TImode or PTImode anywhere except general register
2057 and it must be able to fit within the register set. */
2059 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
2062 /* Implement TARGET_HARD_REGNO_NREGS. */
2064 static unsigned int
2065 rs6000_hard_regno_nregs_hook (unsigned int regno, machine_mode mode)
2067 return rs6000_hard_regno_nregs[mode][regno];
2070 /* Implement TARGET_HARD_REGNO_MODE_OK. */
2072 static bool
2073 rs6000_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
2075 return rs6000_hard_regno_mode_ok_p[mode][regno];
2078 /* Implement TARGET_MODES_TIEABLE_P.
2080 PTImode cannot tie with other modes because PTImode is restricted to even
2081 GPR registers, and TImode can go in any GPR as well as VSX registers (PR
2082 57744).
2084 Altivec/VSX vector tests were moved ahead of scalar float mode, so that IEEE
2085 128-bit floating point on VSX systems ties with other vectors. */
2087 static bool
2088 rs6000_modes_tieable_p (machine_mode mode1, machine_mode mode2)
2090 if (mode1 == PTImode)
2091 return mode2 == PTImode;
2092 if (mode2 == PTImode)
2093 return false;
2095 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode1))
2096 return ALTIVEC_OR_VSX_VECTOR_MODE (mode2);
2097 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode2))
2098 return false;
2100 if (SCALAR_FLOAT_MODE_P (mode1))
2101 return SCALAR_FLOAT_MODE_P (mode2);
2102 if (SCALAR_FLOAT_MODE_P (mode2))
2103 return false;
2105 if (GET_MODE_CLASS (mode1) == MODE_CC)
2106 return GET_MODE_CLASS (mode2) == MODE_CC;
2107 if (GET_MODE_CLASS (mode2) == MODE_CC)
2108 return false;
2110 return true;
2113 /* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED. */
2115 static bool
2116 rs6000_hard_regno_call_part_clobbered (rtx_insn *insn ATTRIBUTE_UNUSED,
2117 unsigned int regno, machine_mode mode)
2119 if (TARGET_32BIT
2120 && TARGET_POWERPC64
2121 && GET_MODE_SIZE (mode) > 4
2122 && INT_REGNO_P (regno))
2123 return true;
2125 if (TARGET_VSX
2126 && FP_REGNO_P (regno)
2127 && GET_MODE_SIZE (mode) > 8
2128 && !FLOAT128_2REG_P (mode))
2129 return true;
2131 return false;
2134 /* Print interesting facts about registers. */
2135 static void
2136 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
2138 int r, m;
2140 for (r = first_regno; r <= last_regno; ++r)
2142 const char *comma = "";
2143 int len;
2145 if (first_regno == last_regno)
2146 fprintf (stderr, "%s:\t", reg_name);
2147 else
2148 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
2150 len = 8;
2151 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2152 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
2154 if (len > 70)
2156 fprintf (stderr, ",\n\t");
2157 len = 8;
2158 comma = "";
2161 if (rs6000_hard_regno_nregs[m][r] > 1)
2162 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
2163 rs6000_hard_regno_nregs[m][r]);
2164 else
2165 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
2167 comma = ", ";
2170 if (call_used_regs[r])
2172 if (len > 70)
2174 fprintf (stderr, ",\n\t");
2175 len = 8;
2176 comma = "";
2179 len += fprintf (stderr, "%s%s", comma, "call-used");
2180 comma = ", ";
2183 if (fixed_regs[r])
2185 if (len > 70)
2187 fprintf (stderr, ",\n\t");
2188 len = 8;
2189 comma = "";
2192 len += fprintf (stderr, "%s%s", comma, "fixed");
2193 comma = ", ";
2196 if (len > 70)
2198 fprintf (stderr, ",\n\t");
2199 comma = "";
2202 len += fprintf (stderr, "%sreg-class = %s", comma,
2203 reg_class_names[(int)rs6000_regno_regclass[r]]);
2204 comma = ", ";
2206 if (len > 70)
2208 fprintf (stderr, ",\n\t");
2209 comma = "";
2212 fprintf (stderr, "%sregno = %d\n", comma, r);
2216 static const char *
2217 rs6000_debug_vector_unit (enum rs6000_vector v)
2219 const char *ret;
2221 switch (v)
2223 case VECTOR_NONE: ret = "none"; break;
2224 case VECTOR_ALTIVEC: ret = "altivec"; break;
2225 case VECTOR_VSX: ret = "vsx"; break;
2226 case VECTOR_P8_VECTOR: ret = "p8_vector"; break;
2227 default: ret = "unknown"; break;
2230 return ret;
2233 /* Inner function printing just the address mask for a particular reload
2234 register class. */
2235 DEBUG_FUNCTION char *
2236 rs6000_debug_addr_mask (addr_mask_type mask, bool keep_spaces)
2238 static char ret[8];
2239 char *p = ret;
2241 if ((mask & RELOAD_REG_VALID) != 0)
2242 *p++ = 'v';
2243 else if (keep_spaces)
2244 *p++ = ' ';
2246 if ((mask & RELOAD_REG_MULTIPLE) != 0)
2247 *p++ = 'm';
2248 else if (keep_spaces)
2249 *p++ = ' ';
2251 if ((mask & RELOAD_REG_INDEXED) != 0)
2252 *p++ = 'i';
2253 else if (keep_spaces)
2254 *p++ = ' ';
2256 if ((mask & RELOAD_REG_QUAD_OFFSET) != 0)
2257 *p++ = 'O';
2258 else if ((mask & RELOAD_REG_OFFSET) != 0)
2259 *p++ = 'o';
2260 else if (keep_spaces)
2261 *p++ = ' ';
2263 if ((mask & RELOAD_REG_PRE_INCDEC) != 0)
2264 *p++ = '+';
2265 else if (keep_spaces)
2266 *p++ = ' ';
2268 if ((mask & RELOAD_REG_PRE_MODIFY) != 0)
2269 *p++ = '+';
2270 else if (keep_spaces)
2271 *p++ = ' ';
2273 if ((mask & RELOAD_REG_AND_M16) != 0)
2274 *p++ = '&';
2275 else if (keep_spaces)
2276 *p++ = ' ';
2278 *p = '\0';
2280 return ret;
2283 /* Print the address masks in a human readble fashion. */
2284 DEBUG_FUNCTION void
2285 rs6000_debug_print_mode (ssize_t m)
2287 ssize_t rc;
2288 int spaces = 0;
2290 fprintf (stderr, "Mode: %-5s", GET_MODE_NAME (m));
2291 for (rc = 0; rc < N_RELOAD_REG; rc++)
2292 fprintf (stderr, " %s: %s", reload_reg_map[rc].name,
2293 rs6000_debug_addr_mask (reg_addr[m].addr_mask[rc], true));
2295 if ((reg_addr[m].reload_store != CODE_FOR_nothing)
2296 || (reg_addr[m].reload_load != CODE_FOR_nothing))
2298 fprintf (stderr, "%*s Reload=%c%c", spaces, "",
2299 (reg_addr[m].reload_store != CODE_FOR_nothing) ? 's' : '*',
2300 (reg_addr[m].reload_load != CODE_FOR_nothing) ? 'l' : '*');
2301 spaces = 0;
2303 else
2304 spaces += sizeof (" Reload=sl") - 1;
2306 if (reg_addr[m].scalar_in_vmx_p)
2308 fprintf (stderr, "%*s Upper=y", spaces, "");
2309 spaces = 0;
2311 else
2312 spaces += sizeof (" Upper=y") - 1;
2314 if (rs6000_vector_unit[m] != VECTOR_NONE
2315 || rs6000_vector_mem[m] != VECTOR_NONE)
2317 fprintf (stderr, "%*s vector: arith=%-10s mem=%s",
2318 spaces, "",
2319 rs6000_debug_vector_unit (rs6000_vector_unit[m]),
2320 rs6000_debug_vector_unit (rs6000_vector_mem[m]));
2323 fputs ("\n", stderr);
2326 #define DEBUG_FMT_ID "%-32s= "
2327 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
2328 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
2329 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
2331 /* Print various interesting information with -mdebug=reg. */
2332 static void
2333 rs6000_debug_reg_global (void)
2335 static const char *const tf[2] = { "false", "true" };
2336 const char *nl = (const char *)0;
2337 int m;
2338 size_t m1, m2, v;
2339 char costly_num[20];
2340 char nop_num[20];
2341 char flags_buffer[40];
2342 const char *costly_str;
2343 const char *nop_str;
2344 const char *trace_str;
2345 const char *abi_str;
2346 const char *cmodel_str;
2347 struct cl_target_option cl_opts;
2349 /* Modes we want tieable information on. */
2350 static const machine_mode print_tieable_modes[] = {
2351 QImode,
2352 HImode,
2353 SImode,
2354 DImode,
2355 TImode,
2356 PTImode,
2357 SFmode,
2358 DFmode,
2359 TFmode,
2360 IFmode,
2361 KFmode,
2362 SDmode,
2363 DDmode,
2364 TDmode,
2365 V16QImode,
2366 V8HImode,
2367 V4SImode,
2368 V2DImode,
2369 V1TImode,
2370 V32QImode,
2371 V16HImode,
2372 V8SImode,
2373 V4DImode,
2374 V2TImode,
2375 V4SFmode,
2376 V2DFmode,
2377 V8SFmode,
2378 V4DFmode,
2379 CCmode,
2380 CCUNSmode,
2381 CCEQmode,
2384 /* Virtual regs we are interested in. */
2385 const static struct {
2386 int regno; /* register number. */
2387 const char *name; /* register name. */
2388 } virtual_regs[] = {
2389 { STACK_POINTER_REGNUM, "stack pointer:" },
2390 { TOC_REGNUM, "toc: " },
2391 { STATIC_CHAIN_REGNUM, "static chain: " },
2392 { RS6000_PIC_OFFSET_TABLE_REGNUM, "pic offset: " },
2393 { HARD_FRAME_POINTER_REGNUM, "hard frame: " },
2394 { ARG_POINTER_REGNUM, "arg pointer: " },
2395 { FRAME_POINTER_REGNUM, "frame pointer:" },
2396 { FIRST_PSEUDO_REGISTER, "first pseudo: " },
2397 { FIRST_VIRTUAL_REGISTER, "first virtual:" },
2398 { VIRTUAL_INCOMING_ARGS_REGNUM, "incoming_args:" },
2399 { VIRTUAL_STACK_VARS_REGNUM, "stack_vars: " },
2400 { VIRTUAL_STACK_DYNAMIC_REGNUM, "stack_dynamic:" },
2401 { VIRTUAL_OUTGOING_ARGS_REGNUM, "outgoing_args:" },
2402 { VIRTUAL_CFA_REGNUM, "cfa (frame): " },
2403 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM, "stack boundry:" },
2404 { LAST_VIRTUAL_REGISTER, "last virtual: " },
2407 fputs ("\nHard register information:\n", stderr);
2408 rs6000_debug_reg_print (FIRST_GPR_REGNO, LAST_GPR_REGNO, "gr");
2409 rs6000_debug_reg_print (FIRST_FPR_REGNO, LAST_FPR_REGNO, "fp");
2410 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
2411 LAST_ALTIVEC_REGNO,
2412 "vs");
2413 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
2414 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
2415 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
2416 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
2417 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
2418 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
2420 fputs ("\nVirtual/stack/frame registers:\n", stderr);
2421 for (v = 0; v < ARRAY_SIZE (virtual_regs); v++)
2422 fprintf (stderr, "%s regno = %3d\n", virtual_regs[v].name, virtual_regs[v].regno);
2424 fprintf (stderr,
2425 "\n"
2426 "d reg_class = %s\n"
2427 "f reg_class = %s\n"
2428 "v reg_class = %s\n"
2429 "wa reg_class = %s\n"
2430 "we reg_class = %s\n"
2431 "wr reg_class = %s\n"
2432 "wx reg_class = %s\n"
2433 "wA reg_class = %s\n"
2434 "\n",
2435 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
2436 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
2437 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
2438 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
2439 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_we]],
2440 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wr]],
2441 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wx]],
2442 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wA]]);
2444 nl = "\n";
2445 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2446 rs6000_debug_print_mode (m);
2448 fputs ("\n", stderr);
2450 for (m1 = 0; m1 < ARRAY_SIZE (print_tieable_modes); m1++)
2452 machine_mode mode1 = print_tieable_modes[m1];
2453 bool first_time = true;
2455 nl = (const char *)0;
2456 for (m2 = 0; m2 < ARRAY_SIZE (print_tieable_modes); m2++)
2458 machine_mode mode2 = print_tieable_modes[m2];
2459 if (mode1 != mode2 && rs6000_modes_tieable_p (mode1, mode2))
2461 if (first_time)
2463 fprintf (stderr, "Tieable modes %s:", GET_MODE_NAME (mode1));
2464 nl = "\n";
2465 first_time = false;
2468 fprintf (stderr, " %s", GET_MODE_NAME (mode2));
2472 if (!first_time)
2473 fputs ("\n", stderr);
2476 if (nl)
2477 fputs (nl, stderr);
2479 if (rs6000_recip_control)
2481 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
2483 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2484 if (rs6000_recip_bits[m])
2486 fprintf (stderr,
2487 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2488 GET_MODE_NAME (m),
2489 (RS6000_RECIP_AUTO_RE_P (m)
2490 ? "auto"
2491 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
2492 (RS6000_RECIP_AUTO_RSQRTE_P (m)
2493 ? "auto"
2494 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
2497 fputs ("\n", stderr);
2500 if (rs6000_cpu_index >= 0)
2502 const char *name = processor_target_table[rs6000_cpu_index].name;
2503 HOST_WIDE_INT flags
2504 = processor_target_table[rs6000_cpu_index].target_enable;
2506 sprintf (flags_buffer, "-mcpu=%s flags", name);
2507 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2509 else
2510 fprintf (stderr, DEBUG_FMT_S, "cpu", "<none>");
2512 if (rs6000_tune_index >= 0)
2514 const char *name = processor_target_table[rs6000_tune_index].name;
2515 HOST_WIDE_INT flags
2516 = processor_target_table[rs6000_tune_index].target_enable;
2518 sprintf (flags_buffer, "-mtune=%s flags", name);
2519 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2521 else
2522 fprintf (stderr, DEBUG_FMT_S, "tune", "<none>");
2524 cl_target_option_save (&cl_opts, &global_options);
2525 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags",
2526 rs6000_isa_flags);
2528 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags_explicit",
2529 rs6000_isa_flags_explicit);
2531 rs6000_print_builtin_options (stderr, 0, "rs6000_builtin_mask",
2532 rs6000_builtin_mask);
2534 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
2536 fprintf (stderr, DEBUG_FMT_S, "--with-cpu default",
2537 OPTION_TARGET_CPU_DEFAULT ? OPTION_TARGET_CPU_DEFAULT : "<none>");
2539 switch (rs6000_sched_costly_dep)
2541 case max_dep_latency:
2542 costly_str = "max_dep_latency";
2543 break;
2545 case no_dep_costly:
2546 costly_str = "no_dep_costly";
2547 break;
2549 case all_deps_costly:
2550 costly_str = "all_deps_costly";
2551 break;
2553 case true_store_to_load_dep_costly:
2554 costly_str = "true_store_to_load_dep_costly";
2555 break;
2557 case store_to_load_dep_costly:
2558 costly_str = "store_to_load_dep_costly";
2559 break;
2561 default:
2562 costly_str = costly_num;
2563 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
2564 break;
2567 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
2569 switch (rs6000_sched_insert_nops)
2571 case sched_finish_regroup_exact:
2572 nop_str = "sched_finish_regroup_exact";
2573 break;
2575 case sched_finish_pad_groups:
2576 nop_str = "sched_finish_pad_groups";
2577 break;
2579 case sched_finish_none:
2580 nop_str = "sched_finish_none";
2581 break;
2583 default:
2584 nop_str = nop_num;
2585 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
2586 break;
2589 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
2591 switch (rs6000_sdata)
2593 default:
2594 case SDATA_NONE:
2595 break;
2597 case SDATA_DATA:
2598 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
2599 break;
2601 case SDATA_SYSV:
2602 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
2603 break;
2605 case SDATA_EABI:
2606 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
2607 break;
2611 switch (rs6000_traceback)
2613 case traceback_default: trace_str = "default"; break;
2614 case traceback_none: trace_str = "none"; break;
2615 case traceback_part: trace_str = "part"; break;
2616 case traceback_full: trace_str = "full"; break;
2617 default: trace_str = "unknown"; break;
2620 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
2622 switch (rs6000_current_cmodel)
2624 case CMODEL_SMALL: cmodel_str = "small"; break;
2625 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
2626 case CMODEL_LARGE: cmodel_str = "large"; break;
2627 default: cmodel_str = "unknown"; break;
2630 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
2632 switch (rs6000_current_abi)
2634 case ABI_NONE: abi_str = "none"; break;
2635 case ABI_AIX: abi_str = "aix"; break;
2636 case ABI_ELFv2: abi_str = "ELFv2"; break;
2637 case ABI_V4: abi_str = "V4"; break;
2638 case ABI_DARWIN: abi_str = "darwin"; break;
2639 default: abi_str = "unknown"; break;
2642 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
2644 if (rs6000_altivec_abi)
2645 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
2647 if (rs6000_darwin64_abi)
2648 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
2650 fprintf (stderr, DEBUG_FMT_S, "soft_float",
2651 (TARGET_SOFT_FLOAT ? "true" : "false"));
2653 if (TARGET_LINK_STACK)
2654 fprintf (stderr, DEBUG_FMT_S, "link_stack", "true");
2656 if (TARGET_P8_FUSION)
2658 char options[80];
2660 strcpy (options, "power8");
2661 if (TARGET_P8_FUSION_SIGN)
2662 strcat (options, ", sign");
2664 fprintf (stderr, DEBUG_FMT_S, "fusion", options);
2667 fprintf (stderr, DEBUG_FMT_S, "plt-format",
2668 TARGET_SECURE_PLT ? "secure" : "bss");
2669 fprintf (stderr, DEBUG_FMT_S, "struct-return",
2670 aix_struct_return ? "aix" : "sysv");
2671 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
2672 fprintf (stderr, DEBUG_FMT_S, "sched_groups", tf[!!rs6000_sched_groups]);
2673 fprintf (stderr, DEBUG_FMT_S, "align_branch",
2674 tf[!!rs6000_align_branch_targets]);
2675 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
2676 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
2677 rs6000_long_double_type_size);
2678 if (rs6000_long_double_type_size > 64)
2680 fprintf (stderr, DEBUG_FMT_S, "long double type",
2681 TARGET_IEEEQUAD ? "IEEE" : "IBM");
2682 fprintf (stderr, DEBUG_FMT_S, "default long double type",
2683 TARGET_IEEEQUAD_DEFAULT ? "IEEE" : "IBM");
2685 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
2686 (int)rs6000_sched_restricted_insns_priority);
2687 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
2688 (int)END_BUILTINS);
2689 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
2690 (int)RS6000_BUILTIN_COUNT);
2692 fprintf (stderr, DEBUG_FMT_D, "Enable float128 on VSX",
2693 (int)TARGET_FLOAT128_ENABLE_TYPE);
2695 if (TARGET_VSX)
2696 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit scalar element",
2697 (int)VECTOR_ELEMENT_SCALAR_64BIT);
2699 if (TARGET_DIRECT_MOVE_128)
2700 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit mfvsrld element",
2701 (int)VECTOR_ELEMENT_MFVSRLD_64BIT);
2705 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2706 legitimate address support to figure out the appropriate addressing to
2707 use. */
2709 static void
2710 rs6000_setup_reg_addr_masks (void)
2712 ssize_t rc, reg, m, nregs;
2713 addr_mask_type any_addr_mask, addr_mask;
2715 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2717 machine_mode m2 = (machine_mode) m;
2718 bool complex_p = false;
2719 bool small_int_p = (m2 == QImode || m2 == HImode || m2 == SImode);
2720 size_t msize;
2722 if (COMPLEX_MODE_P (m2))
2724 complex_p = true;
2725 m2 = GET_MODE_INNER (m2);
2728 msize = GET_MODE_SIZE (m2);
2730 /* SDmode is special in that we want to access it only via REG+REG
2731 addressing on power7 and above, since we want to use the LFIWZX and
2732 STFIWZX instructions to load it. */
2733 bool indexed_only_p = (m == SDmode && TARGET_NO_SDMODE_STACK);
2735 any_addr_mask = 0;
2736 for (rc = FIRST_RELOAD_REG_CLASS; rc <= LAST_RELOAD_REG_CLASS; rc++)
2738 addr_mask = 0;
2739 reg = reload_reg_map[rc].reg;
2741 /* Can mode values go in the GPR/FPR/Altivec registers? */
2742 if (reg >= 0 && rs6000_hard_regno_mode_ok_p[m][reg])
2744 bool small_int_vsx_p = (small_int_p
2745 && (rc == RELOAD_REG_FPR
2746 || rc == RELOAD_REG_VMX));
2748 nregs = rs6000_hard_regno_nregs[m][reg];
2749 addr_mask |= RELOAD_REG_VALID;
2751 /* Indicate if the mode takes more than 1 physical register. If
2752 it takes a single register, indicate it can do REG+REG
2753 addressing. Small integers in VSX registers can only do
2754 REG+REG addressing. */
2755 if (small_int_vsx_p)
2756 addr_mask |= RELOAD_REG_INDEXED;
2757 else if (nregs > 1 || m == BLKmode || complex_p)
2758 addr_mask |= RELOAD_REG_MULTIPLE;
2759 else
2760 addr_mask |= RELOAD_REG_INDEXED;
2762 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2763 addressing. If we allow scalars into Altivec registers,
2764 don't allow PRE_INC, PRE_DEC, or PRE_MODIFY.
2766 For VSX systems, we don't allow update addressing for
2767 DFmode/SFmode if those registers can go in both the
2768 traditional floating point registers and Altivec registers.
2769 The load/store instructions for the Altivec registers do not
2770 have update forms. If we allowed update addressing, it seems
2771 to break IV-OPT code using floating point if the index type is
2772 int instead of long (PR target/81550 and target/84042). */
2774 if (TARGET_UPDATE
2775 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR)
2776 && msize <= 8
2777 && !VECTOR_MODE_P (m2)
2778 && !FLOAT128_VECTOR_P (m2)
2779 && !complex_p
2780 && (m != E_DFmode || !TARGET_VSX)
2781 && (m != E_SFmode || !TARGET_P8_VECTOR)
2782 && !small_int_vsx_p)
2784 addr_mask |= RELOAD_REG_PRE_INCDEC;
2786 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2787 we don't allow PRE_MODIFY for some multi-register
2788 operations. */
2789 switch (m)
2791 default:
2792 addr_mask |= RELOAD_REG_PRE_MODIFY;
2793 break;
2795 case E_DImode:
2796 if (TARGET_POWERPC64)
2797 addr_mask |= RELOAD_REG_PRE_MODIFY;
2798 break;
2800 case E_DFmode:
2801 case E_DDmode:
2802 if (TARGET_HARD_FLOAT)
2803 addr_mask |= RELOAD_REG_PRE_MODIFY;
2804 break;
2809 /* GPR and FPR registers can do REG+OFFSET addressing, except
2810 possibly for SDmode. ISA 3.0 (i.e. power9) adds D-form addressing
2811 for 64-bit scalars and 32-bit SFmode to altivec registers. */
2812 if ((addr_mask != 0) && !indexed_only_p
2813 && msize <= 8
2814 && (rc == RELOAD_REG_GPR
2815 || ((msize == 8 || m2 == SFmode)
2816 && (rc == RELOAD_REG_FPR
2817 || (rc == RELOAD_REG_VMX && TARGET_P9_VECTOR)))))
2818 addr_mask |= RELOAD_REG_OFFSET;
2820 /* VSX registers can do REG+OFFSET addresssing if ISA 3.0
2821 instructions are enabled. The offset for 128-bit VSX registers is
2822 only 12-bits. While GPRs can handle the full offset range, VSX
2823 registers can only handle the restricted range. */
2824 else if ((addr_mask != 0) && !indexed_only_p
2825 && msize == 16 && TARGET_P9_VECTOR
2826 && (ALTIVEC_OR_VSX_VECTOR_MODE (m2)
2827 || (m2 == TImode && TARGET_VSX)))
2829 addr_mask |= RELOAD_REG_OFFSET;
2830 if (rc == RELOAD_REG_FPR || rc == RELOAD_REG_VMX)
2831 addr_mask |= RELOAD_REG_QUAD_OFFSET;
2834 /* VMX registers can do (REG & -16) and ((REG+REG) & -16)
2835 addressing on 128-bit types. */
2836 if (rc == RELOAD_REG_VMX && msize == 16
2837 && (addr_mask & RELOAD_REG_VALID) != 0)
2838 addr_mask |= RELOAD_REG_AND_M16;
2840 reg_addr[m].addr_mask[rc] = addr_mask;
2841 any_addr_mask |= addr_mask;
2844 reg_addr[m].addr_mask[RELOAD_REG_ANY] = any_addr_mask;
2849 /* Initialize the various global tables that are based on register size. */
2850 static void
2851 rs6000_init_hard_regno_mode_ok (bool global_init_p)
2853 ssize_t r, m, c;
2854 int align64;
2855 int align32;
2857 /* Precalculate REGNO_REG_CLASS. */
2858 rs6000_regno_regclass[0] = GENERAL_REGS;
2859 for (r = 1; r < 32; ++r)
2860 rs6000_regno_regclass[r] = BASE_REGS;
2862 for (r = 32; r < 64; ++r)
2863 rs6000_regno_regclass[r] = FLOAT_REGS;
2865 for (r = 64; HARD_REGISTER_NUM_P (r); ++r)
2866 rs6000_regno_regclass[r] = NO_REGS;
2868 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
2869 rs6000_regno_regclass[r] = ALTIVEC_REGS;
2871 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
2872 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
2873 rs6000_regno_regclass[r] = CR_REGS;
2875 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
2876 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
2877 rs6000_regno_regclass[CA_REGNO] = NO_REGS;
2878 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
2879 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
2880 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
2881 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
2883 /* Precalculate register class to simpler reload register class. We don't
2884 need all of the register classes that are combinations of different
2885 classes, just the simple ones that have constraint letters. */
2886 for (c = 0; c < N_REG_CLASSES; c++)
2887 reg_class_to_reg_type[c] = NO_REG_TYPE;
2889 reg_class_to_reg_type[(int)GENERAL_REGS] = GPR_REG_TYPE;
2890 reg_class_to_reg_type[(int)BASE_REGS] = GPR_REG_TYPE;
2891 reg_class_to_reg_type[(int)VSX_REGS] = VSX_REG_TYPE;
2892 reg_class_to_reg_type[(int)VRSAVE_REGS] = SPR_REG_TYPE;
2893 reg_class_to_reg_type[(int)VSCR_REGS] = SPR_REG_TYPE;
2894 reg_class_to_reg_type[(int)LINK_REGS] = SPR_REG_TYPE;
2895 reg_class_to_reg_type[(int)CTR_REGS] = SPR_REG_TYPE;
2896 reg_class_to_reg_type[(int)LINK_OR_CTR_REGS] = SPR_REG_TYPE;
2897 reg_class_to_reg_type[(int)CR_REGS] = CR_REG_TYPE;
2898 reg_class_to_reg_type[(int)CR0_REGS] = CR_REG_TYPE;
2900 if (TARGET_VSX)
2902 reg_class_to_reg_type[(int)FLOAT_REGS] = VSX_REG_TYPE;
2903 reg_class_to_reg_type[(int)ALTIVEC_REGS] = VSX_REG_TYPE;
2905 else
2907 reg_class_to_reg_type[(int)FLOAT_REGS] = FPR_REG_TYPE;
2908 reg_class_to_reg_type[(int)ALTIVEC_REGS] = ALTIVEC_REG_TYPE;
2911 /* Precalculate the valid memory formats as well as the vector information,
2912 this must be set up before the rs6000_hard_regno_nregs_internal calls
2913 below. */
2914 gcc_assert ((int)VECTOR_NONE == 0);
2915 memset ((void *) &rs6000_vector_unit[0], '\0', sizeof (rs6000_vector_unit));
2916 memset ((void *) &rs6000_vector_mem[0], '\0', sizeof (rs6000_vector_mem));
2918 gcc_assert ((int)CODE_FOR_nothing == 0);
2919 memset ((void *) &reg_addr[0], '\0', sizeof (reg_addr));
2921 gcc_assert ((int)NO_REGS == 0);
2922 memset ((void *) &rs6000_constraints[0], '\0', sizeof (rs6000_constraints));
2924 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
2925 believes it can use native alignment or still uses 128-bit alignment. */
2926 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
2928 align64 = 64;
2929 align32 = 32;
2931 else
2933 align64 = 128;
2934 align32 = 128;
2937 /* KF mode (IEEE 128-bit in VSX registers). We do not have arithmetic, so
2938 only set the memory modes. Include TFmode if -mabi=ieeelongdouble. */
2939 if (TARGET_FLOAT128_TYPE)
2941 rs6000_vector_mem[KFmode] = VECTOR_VSX;
2942 rs6000_vector_align[KFmode] = 128;
2944 if (FLOAT128_IEEE_P (TFmode))
2946 rs6000_vector_mem[TFmode] = VECTOR_VSX;
2947 rs6000_vector_align[TFmode] = 128;
2951 /* V2DF mode, VSX only. */
2952 if (TARGET_VSX)
2954 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
2955 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
2956 rs6000_vector_align[V2DFmode] = align64;
2959 /* V4SF mode, either VSX or Altivec. */
2960 if (TARGET_VSX)
2962 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
2963 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
2964 rs6000_vector_align[V4SFmode] = align32;
2966 else if (TARGET_ALTIVEC)
2968 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
2969 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
2970 rs6000_vector_align[V4SFmode] = align32;
2973 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
2974 and stores. */
2975 if (TARGET_ALTIVEC)
2977 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
2978 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
2979 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
2980 rs6000_vector_align[V4SImode] = align32;
2981 rs6000_vector_align[V8HImode] = align32;
2982 rs6000_vector_align[V16QImode] = align32;
2984 if (TARGET_VSX)
2986 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
2987 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
2988 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
2990 else
2992 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
2993 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
2994 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
2998 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
2999 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
3000 if (TARGET_VSX)
3002 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
3003 rs6000_vector_unit[V2DImode]
3004 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3005 rs6000_vector_align[V2DImode] = align64;
3007 rs6000_vector_mem[V1TImode] = VECTOR_VSX;
3008 rs6000_vector_unit[V1TImode]
3009 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3010 rs6000_vector_align[V1TImode] = 128;
3013 /* DFmode, see if we want to use the VSX unit. Memory is handled
3014 differently, so don't set rs6000_vector_mem. */
3015 if (TARGET_VSX)
3017 rs6000_vector_unit[DFmode] = VECTOR_VSX;
3018 rs6000_vector_align[DFmode] = 64;
3021 /* SFmode, see if we want to use the VSX unit. */
3022 if (TARGET_P8_VECTOR)
3024 rs6000_vector_unit[SFmode] = VECTOR_VSX;
3025 rs6000_vector_align[SFmode] = 32;
3028 /* Allow TImode in VSX register and set the VSX memory macros. */
3029 if (TARGET_VSX)
3031 rs6000_vector_mem[TImode] = VECTOR_VSX;
3032 rs6000_vector_align[TImode] = align64;
3035 /* Register class constraints for the constraints that depend on compile
3036 switches. When the VSX code was added, different constraints were added
3037 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
3038 of the VSX registers are used. The register classes for scalar floating
3039 point types is set, based on whether we allow that type into the upper
3040 (Altivec) registers. GCC has register classes to target the Altivec
3041 registers for load/store operations, to select using a VSX memory
3042 operation instead of the traditional floating point operation. The
3043 constraints are:
3045 d - Register class to use with traditional DFmode instructions.
3046 f - Register class to use with traditional SFmode instructions.
3047 v - Altivec register.
3048 wa - Any VSX register.
3049 wc - Reserved to represent individual CR bits (used in LLVM).
3050 wn - always NO_REGS.
3051 wr - GPR if 64-bit mode is permitted.
3052 wx - Float register if we can do 32-bit int stores. */
3054 if (TARGET_HARD_FLOAT)
3056 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS; /* SFmode */
3057 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS; /* DFmode */
3060 if (TARGET_VSX)
3061 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
3063 /* Add conditional constraints based on various options, to allow us to
3064 collapse multiple insn patterns. */
3065 if (TARGET_ALTIVEC)
3066 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
3068 if (TARGET_POWERPC64)
3070 rs6000_constraints[RS6000_CONSTRAINT_wr] = GENERAL_REGS;
3071 rs6000_constraints[RS6000_CONSTRAINT_wA] = BASE_REGS;
3074 if (TARGET_STFIWX)
3075 rs6000_constraints[RS6000_CONSTRAINT_wx] = FLOAT_REGS; /* DImode */
3077 /* Support for new direct moves (ISA 3.0 + 64bit). */
3078 if (TARGET_DIRECT_MOVE_128)
3079 rs6000_constraints[RS6000_CONSTRAINT_we] = VSX_REGS;
3081 /* Set up the reload helper and direct move functions. */
3082 if (TARGET_VSX || TARGET_ALTIVEC)
3084 if (TARGET_64BIT)
3086 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_di_store;
3087 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_di_load;
3088 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_di_store;
3089 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_di_load;
3090 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_di_store;
3091 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_di_load;
3092 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_di_store;
3093 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_di_load;
3094 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_di_store;
3095 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_di_load;
3096 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_di_store;
3097 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_di_load;
3098 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_di_store;
3099 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_di_load;
3100 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_di_store;
3101 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_di_load;
3102 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_di_store;
3103 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_di_load;
3104 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_di_store;
3105 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_di_load;
3107 if (FLOAT128_VECTOR_P (KFmode))
3109 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_di_store;
3110 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_di_load;
3113 if (FLOAT128_VECTOR_P (TFmode))
3115 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_di_store;
3116 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_di_load;
3119 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3120 available. */
3121 if (TARGET_NO_SDMODE_STACK)
3123 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_di_store;
3124 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_di_load;
3127 if (TARGET_VSX)
3129 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_di_store;
3130 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_di_load;
3133 if (TARGET_DIRECT_MOVE && !TARGET_DIRECT_MOVE_128)
3135 reg_addr[TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxti;
3136 reg_addr[V1TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv1ti;
3137 reg_addr[V2DFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2df;
3138 reg_addr[V2DImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2di;
3139 reg_addr[V4SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4sf;
3140 reg_addr[V4SImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4si;
3141 reg_addr[V8HImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv8hi;
3142 reg_addr[V16QImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv16qi;
3143 reg_addr[SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxsf;
3145 reg_addr[TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprti;
3146 reg_addr[V1TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv1ti;
3147 reg_addr[V2DFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2df;
3148 reg_addr[V2DImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2di;
3149 reg_addr[V4SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4sf;
3150 reg_addr[V4SImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4si;
3151 reg_addr[V8HImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv8hi;
3152 reg_addr[V16QImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv16qi;
3153 reg_addr[SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprsf;
3155 if (FLOAT128_VECTOR_P (KFmode))
3157 reg_addr[KFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxkf;
3158 reg_addr[KFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprkf;
3161 if (FLOAT128_VECTOR_P (TFmode))
3163 reg_addr[TFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxtf;
3164 reg_addr[TFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprtf;
3168 else
3170 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_si_store;
3171 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_si_load;
3172 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_si_store;
3173 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_si_load;
3174 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_si_store;
3175 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_si_load;
3176 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_si_store;
3177 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_si_load;
3178 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_si_store;
3179 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_si_load;
3180 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_si_store;
3181 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_si_load;
3182 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_si_store;
3183 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_si_load;
3184 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_si_store;
3185 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_si_load;
3186 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_si_store;
3187 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_si_load;
3188 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_si_store;
3189 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_si_load;
3191 if (FLOAT128_VECTOR_P (KFmode))
3193 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_si_store;
3194 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_si_load;
3197 if (FLOAT128_IEEE_P (TFmode))
3199 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_si_store;
3200 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_si_load;
3203 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3204 available. */
3205 if (TARGET_NO_SDMODE_STACK)
3207 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_si_store;
3208 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_si_load;
3211 if (TARGET_VSX)
3213 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_si_store;
3214 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_si_load;
3217 if (TARGET_DIRECT_MOVE)
3219 reg_addr[DImode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdi;
3220 reg_addr[DDmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdd;
3221 reg_addr[DFmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdf;
3225 reg_addr[DFmode].scalar_in_vmx_p = true;
3226 reg_addr[DImode].scalar_in_vmx_p = true;
3228 if (TARGET_P8_VECTOR)
3230 reg_addr[SFmode].scalar_in_vmx_p = true;
3231 reg_addr[SImode].scalar_in_vmx_p = true;
3233 if (TARGET_P9_VECTOR)
3235 reg_addr[HImode].scalar_in_vmx_p = true;
3236 reg_addr[QImode].scalar_in_vmx_p = true;
3241 /* Precalculate HARD_REGNO_NREGS. */
3242 for (r = 0; HARD_REGISTER_NUM_P (r); ++r)
3243 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3244 rs6000_hard_regno_nregs[m][r]
3245 = rs6000_hard_regno_nregs_internal (r, (machine_mode) m);
3247 /* Precalculate TARGET_HARD_REGNO_MODE_OK. */
3248 for (r = 0; HARD_REGISTER_NUM_P (r); ++r)
3249 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3250 rs6000_hard_regno_mode_ok_p[m][r]
3251 = rs6000_hard_regno_mode_ok_uncached (r, (machine_mode) m);
3253 /* Precalculate CLASS_MAX_NREGS sizes. */
3254 for (c = 0; c < LIM_REG_CLASSES; ++c)
3256 int reg_size;
3258 if (TARGET_VSX && VSX_REG_CLASS_P (c))
3259 reg_size = UNITS_PER_VSX_WORD;
3261 else if (c == ALTIVEC_REGS)
3262 reg_size = UNITS_PER_ALTIVEC_WORD;
3264 else if (c == FLOAT_REGS)
3265 reg_size = UNITS_PER_FP_WORD;
3267 else
3268 reg_size = UNITS_PER_WORD;
3270 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3272 machine_mode m2 = (machine_mode)m;
3273 int reg_size2 = reg_size;
3275 /* TDmode & IBM 128-bit floating point always takes 2 registers, even
3276 in VSX. */
3277 if (TARGET_VSX && VSX_REG_CLASS_P (c) && FLOAT128_2REG_P (m))
3278 reg_size2 = UNITS_PER_FP_WORD;
3280 rs6000_class_max_nregs[m][c]
3281 = (GET_MODE_SIZE (m2) + reg_size2 - 1) / reg_size2;
3285 /* Calculate which modes to automatically generate code to use a the
3286 reciprocal divide and square root instructions. In the future, possibly
3287 automatically generate the instructions even if the user did not specify
3288 -mrecip. The older machines double precision reciprocal sqrt estimate is
3289 not accurate enough. */
3290 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
3291 if (TARGET_FRES)
3292 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3293 if (TARGET_FRE)
3294 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3295 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3296 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3297 if (VECTOR_UNIT_VSX_P (V2DFmode))
3298 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3300 if (TARGET_FRSQRTES)
3301 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3302 if (TARGET_FRSQRTE)
3303 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3304 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3305 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3306 if (VECTOR_UNIT_VSX_P (V2DFmode))
3307 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3309 if (rs6000_recip_control)
3311 if (!flag_finite_math_only)
3312 warning (0, "%qs requires %qs or %qs", "-mrecip", "-ffinite-math",
3313 "-ffast-math");
3314 if (flag_trapping_math)
3315 warning (0, "%qs requires %qs or %qs", "-mrecip",
3316 "-fno-trapping-math", "-ffast-math");
3317 if (!flag_reciprocal_math)
3318 warning (0, "%qs requires %qs or %qs", "-mrecip", "-freciprocal-math",
3319 "-ffast-math");
3320 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
3322 if (RS6000_RECIP_HAVE_RE_P (SFmode)
3323 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
3324 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3326 if (RS6000_RECIP_HAVE_RE_P (DFmode)
3327 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
3328 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3330 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
3331 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
3332 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3334 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
3335 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
3336 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3338 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
3339 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
3340 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3342 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
3343 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
3344 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3346 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
3347 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
3348 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3350 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
3351 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
3352 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3356 /* Update the addr mask bits in reg_addr to help secondary reload and go if
3357 legitimate address support to figure out the appropriate addressing to
3358 use. */
3359 rs6000_setup_reg_addr_masks ();
3361 if (global_init_p || TARGET_DEBUG_TARGET)
3363 if (TARGET_DEBUG_REG)
3364 rs6000_debug_reg_global ();
3366 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
3367 fprintf (stderr,
3368 "SImode variable mult cost = %d\n"
3369 "SImode constant mult cost = %d\n"
3370 "SImode short constant mult cost = %d\n"
3371 "DImode multipliciation cost = %d\n"
3372 "SImode division cost = %d\n"
3373 "DImode division cost = %d\n"
3374 "Simple fp operation cost = %d\n"
3375 "DFmode multiplication cost = %d\n"
3376 "SFmode division cost = %d\n"
3377 "DFmode division cost = %d\n"
3378 "cache line size = %d\n"
3379 "l1 cache size = %d\n"
3380 "l2 cache size = %d\n"
3381 "simultaneous prefetches = %d\n"
3382 "\n",
3383 rs6000_cost->mulsi,
3384 rs6000_cost->mulsi_const,
3385 rs6000_cost->mulsi_const9,
3386 rs6000_cost->muldi,
3387 rs6000_cost->divsi,
3388 rs6000_cost->divdi,
3389 rs6000_cost->fp,
3390 rs6000_cost->dmul,
3391 rs6000_cost->sdiv,
3392 rs6000_cost->ddiv,
3393 rs6000_cost->cache_line_size,
3394 rs6000_cost->l1_cache_size,
3395 rs6000_cost->l2_cache_size,
3396 rs6000_cost->simultaneous_prefetches);
3400 #if TARGET_MACHO
3401 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
3403 static void
3404 darwin_rs6000_override_options (void)
3406 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
3407 off. */
3408 rs6000_altivec_abi = 1;
3409 TARGET_ALTIVEC_VRSAVE = 1;
3410 rs6000_current_abi = ABI_DARWIN;
3412 if (DEFAULT_ABI == ABI_DARWIN
3413 && TARGET_64BIT)
3414 darwin_one_byte_bool = 1;
3416 if (TARGET_64BIT && ! TARGET_POWERPC64)
3418 rs6000_isa_flags |= OPTION_MASK_POWERPC64;
3419 warning (0, "%qs requires PowerPC64 architecture, enabling", "-m64");
3422 /* The linkers [ld64] that support 64Bit do not need the JBSR longcall
3423 optimisation, and will not work with the most generic case (where the
3424 symbol is undefined external, but there is no symbl stub). */
3425 if (TARGET_64BIT)
3426 rs6000_default_long_calls = 0;
3428 /* ld_classic is (so far) still used for kernel (static) code, and supports
3429 the JBSR longcall / branch islands. */
3430 if (flag_mkernel)
3432 rs6000_default_long_calls = 1;
3434 /* Allow a kext author to do -mkernel -mhard-float. */
3435 if (! (rs6000_isa_flags_explicit & OPTION_MASK_SOFT_FLOAT))
3436 rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT;
3439 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3440 Altivec. */
3441 if (!flag_mkernel && !flag_apple_kext
3442 && TARGET_64BIT
3443 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC))
3444 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3446 /* Unless the user (not the configurer) has explicitly overridden
3447 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3448 G4 unless targeting the kernel. */
3449 if (!flag_mkernel
3450 && !flag_apple_kext
3451 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
3452 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC)
3453 && ! global_options_set.x_rs6000_cpu_index)
3455 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3458 #endif
3460 /* If not otherwise specified by a target, make 'long double' equivalent to
3461 'double'. */
3463 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3464 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3465 #endif
3467 /* Return the builtin mask of the various options used that could affect which
3468 builtins were used. In the past we used target_flags, but we've run out of
3469 bits, and some options are no longer in target_flags. */
3471 HOST_WIDE_INT
3472 rs6000_builtin_mask_calculate (void)
3474 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
3475 | ((TARGET_CMPB) ? RS6000_BTM_CMPB : 0)
3476 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
3477 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
3478 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
3479 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
3480 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
3481 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
3482 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0)
3483 | ((TARGET_P8_VECTOR) ? RS6000_BTM_P8_VECTOR : 0)
3484 | ((TARGET_P9_VECTOR) ? RS6000_BTM_P9_VECTOR : 0)
3485 | ((TARGET_P9_MISC) ? RS6000_BTM_P9_MISC : 0)
3486 | ((TARGET_MODULO) ? RS6000_BTM_MODULO : 0)
3487 | ((TARGET_64BIT) ? RS6000_BTM_64BIT : 0)
3488 | ((TARGET_POWERPC64) ? RS6000_BTM_POWERPC64 : 0)
3489 | ((TARGET_CRYPTO) ? RS6000_BTM_CRYPTO : 0)
3490 | ((TARGET_HTM) ? RS6000_BTM_HTM : 0)
3491 | ((TARGET_DFP) ? RS6000_BTM_DFP : 0)
3492 | ((TARGET_HARD_FLOAT) ? RS6000_BTM_HARD_FLOAT : 0)
3493 | ((TARGET_LONG_DOUBLE_128
3494 && TARGET_HARD_FLOAT
3495 && !TARGET_IEEEQUAD) ? RS6000_BTM_LDBL128 : 0)
3496 | ((TARGET_FLOAT128_TYPE) ? RS6000_BTM_FLOAT128 : 0)
3497 | ((TARGET_FLOAT128_HW) ? RS6000_BTM_FLOAT128_HW : 0));
3500 /* Implement TARGET_MD_ASM_ADJUST. All asm statements are considered
3501 to clobber the XER[CA] bit because clobbering that bit without telling
3502 the compiler worked just fine with versions of GCC before GCC 5, and
3503 breaking a lot of older code in ways that are hard to track down is
3504 not such a great idea. */
3506 static rtx_insn *
3507 rs6000_md_asm_adjust (vec<rtx> &/*outputs*/, vec<rtx> &/*inputs*/,
3508 vec<const char *> &/*constraints*/,
3509 vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs)
3511 clobbers.safe_push (gen_rtx_REG (SImode, CA_REGNO));
3512 SET_HARD_REG_BIT (clobbered_regs, CA_REGNO);
3513 return NULL;
3516 /* Override command line options.
3518 Combine build-specific configuration information with options
3519 specified on the command line to set various state variables which
3520 influence code generation, optimization, and expansion of built-in
3521 functions. Assure that command-line configuration preferences are
3522 compatible with each other and with the build configuration; issue
3523 warnings while adjusting configuration or error messages while
3524 rejecting configuration.
3526 Upon entry to this function:
3528 This function is called once at the beginning of
3529 compilation, and then again at the start and end of compiling
3530 each section of code that has a different configuration, as
3531 indicated, for example, by adding the
3533 __attribute__((__target__("cpu=power9")))
3535 qualifier to a function definition or, for example, by bracketing
3536 code between
3538 #pragma GCC target("altivec")
3542 #pragma GCC reset_options
3544 directives. Parameter global_init_p is true for the initial
3545 invocation, which initializes global variables, and false for all
3546 subsequent invocations.
3549 Various global state information is assumed to be valid. This
3550 includes OPTION_TARGET_CPU_DEFAULT, representing the name of the
3551 default CPU specified at build configure time, TARGET_DEFAULT,
3552 representing the default set of option flags for the default
3553 target, and global_options_set.x_rs6000_isa_flags, representing
3554 which options were requested on the command line.
3556 Upon return from this function:
3558 rs6000_isa_flags_explicit has a non-zero bit for each flag that
3559 was set by name on the command line. Additionally, if certain
3560 attributes are automatically enabled or disabled by this function
3561 in order to assure compatibility between options and
3562 configuration, the flags associated with those attributes are
3563 also set. By setting these "explicit bits", we avoid the risk
3564 that other code might accidentally overwrite these particular
3565 attributes with "default values".
3567 The various bits of rs6000_isa_flags are set to indicate the
3568 target options that have been selected for the most current
3569 compilation efforts. This has the effect of also turning on the
3570 associated TARGET_XXX values since these are macros which are
3571 generally defined to test the corresponding bit of the
3572 rs6000_isa_flags variable.
3574 The variable rs6000_builtin_mask is set to represent the target
3575 options for the most current compilation efforts, consistent with
3576 the current contents of rs6000_isa_flags. This variable controls
3577 expansion of built-in functions.
3579 Various other global variables and fields of global structures
3580 (over 50 in all) are initialized to reflect the desired options
3581 for the most current compilation efforts. */
3583 static bool
3584 rs6000_option_override_internal (bool global_init_p)
3586 bool ret = true;
3588 HOST_WIDE_INT set_masks;
3589 HOST_WIDE_INT ignore_masks;
3590 int cpu_index = -1;
3591 int tune_index;
3592 struct cl_target_option *main_target_opt
3593 = ((global_init_p || target_option_default_node == NULL)
3594 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
3596 /* Print defaults. */
3597 if ((TARGET_DEBUG_REG || TARGET_DEBUG_TARGET) && global_init_p)
3598 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
3600 /* Remember the explicit arguments. */
3601 if (global_init_p)
3602 rs6000_isa_flags_explicit = global_options_set.x_rs6000_isa_flags;
3604 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
3605 library functions, so warn about it. The flag may be useful for
3606 performance studies from time to time though, so don't disable it
3607 entirely. */
3608 if (global_options_set.x_rs6000_alignment_flags
3609 && rs6000_alignment_flags == MASK_ALIGN_POWER
3610 && DEFAULT_ABI == ABI_DARWIN
3611 && TARGET_64BIT)
3612 warning (0, "%qs is not supported for 64-bit Darwin;"
3613 " it is incompatible with the installed C and C++ libraries",
3614 "-malign-power");
3616 /* Numerous experiment shows that IRA based loop pressure
3617 calculation works better for RTL loop invariant motion on targets
3618 with enough (>= 32) registers. It is an expensive optimization.
3619 So it is on only for peak performance. */
3620 if (optimize >= 3 && global_init_p
3621 && !global_options_set.x_flag_ira_loop_pressure)
3622 flag_ira_loop_pressure = 1;
3624 /* -fsanitize=address needs to turn on -fasynchronous-unwind-tables in order
3625 for tracebacks to be complete but not if any -fasynchronous-unwind-tables
3626 options were already specified. */
3627 if (flag_sanitize & SANITIZE_USER_ADDRESS
3628 && !global_options_set.x_flag_asynchronous_unwind_tables)
3629 flag_asynchronous_unwind_tables = 1;
3631 /* -fvariable-expansion-in-unroller is a win for POWER whenever the
3632 loop unroller is active. It is only checked during unrolling, so
3633 we can just set it on by default. */
3634 if (!global_options_set.x_flag_variable_expansion_in_unroller)
3635 flag_variable_expansion_in_unroller = 1;
3637 /* Set the pointer size. */
3638 if (TARGET_64BIT)
3640 rs6000_pmode = DImode;
3641 rs6000_pointer_size = 64;
3643 else
3645 rs6000_pmode = SImode;
3646 rs6000_pointer_size = 32;
3649 /* Some OSs don't support saving the high part of 64-bit registers on context
3650 switch. Other OSs don't support saving Altivec registers. On those OSs,
3651 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
3652 if the user wants either, the user must explicitly specify them and we
3653 won't interfere with the user's specification. */
3655 set_masks = POWERPC_MASKS;
3656 #ifdef OS_MISSING_POWERPC64
3657 if (OS_MISSING_POWERPC64)
3658 set_masks &= ~OPTION_MASK_POWERPC64;
3659 #endif
3660 #ifdef OS_MISSING_ALTIVEC
3661 if (OS_MISSING_ALTIVEC)
3662 set_masks &= ~(OPTION_MASK_ALTIVEC | OPTION_MASK_VSX
3663 | OTHER_VSX_VECTOR_MASKS);
3664 #endif
3666 /* Don't override by the processor default if given explicitly. */
3667 set_masks &= ~rs6000_isa_flags_explicit;
3669 if (global_init_p && rs6000_dejagnu_cpu_index >= 0)
3670 rs6000_cpu_index = rs6000_dejagnu_cpu_index;
3672 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
3673 the cpu in a target attribute or pragma, but did not specify a tuning
3674 option, use the cpu for the tuning option rather than the option specified
3675 with -mtune on the command line. Process a '--with-cpu' configuration
3676 request as an implicit --cpu. */
3677 if (rs6000_cpu_index >= 0)
3678 cpu_index = rs6000_cpu_index;
3679 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
3680 cpu_index = main_target_opt->x_rs6000_cpu_index;
3681 else if (OPTION_TARGET_CPU_DEFAULT)
3682 cpu_index = rs6000_cpu_name_lookup (OPTION_TARGET_CPU_DEFAULT);
3684 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
3685 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
3686 with those from the cpu, except for options that were explicitly set. If
3687 we don't have a cpu, do not override the target bits set in
3688 TARGET_DEFAULT. */
3689 if (cpu_index >= 0)
3691 rs6000_cpu_index = cpu_index;
3692 rs6000_isa_flags &= ~set_masks;
3693 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
3694 & set_masks);
3696 else
3698 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
3699 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
3700 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
3701 to using rs6000_isa_flags, we need to do the initialization here.
3703 If there is a TARGET_DEFAULT, use that. Otherwise fall back to using
3704 -mcpu=powerpc, -mcpu=powerpc64, or -mcpu=powerpc64le defaults. */
3705 HOST_WIDE_INT flags;
3706 if (TARGET_DEFAULT)
3707 flags = TARGET_DEFAULT;
3708 else
3710 /* PowerPC 64-bit LE requires at least ISA 2.07. */
3711 const char *default_cpu = (!TARGET_POWERPC64
3712 ? "powerpc"
3713 : (BYTES_BIG_ENDIAN
3714 ? "powerpc64"
3715 : "powerpc64le"));
3716 int default_cpu_index = rs6000_cpu_name_lookup (default_cpu);
3717 flags = processor_target_table[default_cpu_index].target_enable;
3719 rs6000_isa_flags |= (flags & ~rs6000_isa_flags_explicit);
3722 if (rs6000_tune_index >= 0)
3723 tune_index = rs6000_tune_index;
3724 else if (cpu_index >= 0)
3725 rs6000_tune_index = tune_index = cpu_index;
3726 else
3728 size_t i;
3729 enum processor_type tune_proc
3730 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
3732 tune_index = -1;
3733 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
3734 if (processor_target_table[i].processor == tune_proc)
3736 tune_index = i;
3737 break;
3741 if (cpu_index >= 0)
3742 rs6000_cpu = processor_target_table[cpu_index].processor;
3743 else
3744 rs6000_cpu = TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT;
3746 gcc_assert (tune_index >= 0);
3747 rs6000_tune = processor_target_table[tune_index].processor;
3749 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
3750 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
3751 || rs6000_cpu == PROCESSOR_PPCE5500)
3753 if (TARGET_ALTIVEC)
3754 error ("AltiVec not supported in this target");
3757 /* If we are optimizing big endian systems for space, use the load/store
3758 multiple instructions. */
3759 if (BYTES_BIG_ENDIAN && optimize_size)
3760 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE;
3762 /* Don't allow -mmultiple on little endian systems unless the cpu is a 750,
3763 because the hardware doesn't support the instructions used in little
3764 endian mode, and causes an alignment trap. The 750 does not cause an
3765 alignment trap (except when the target is unaligned). */
3767 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750 && TARGET_MULTIPLE)
3769 rs6000_isa_flags &= ~OPTION_MASK_MULTIPLE;
3770 if ((rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE) != 0)
3771 warning (0, "%qs is not supported on little endian systems",
3772 "-mmultiple");
3775 /* If little-endian, default to -mstrict-align on older processors.
3776 Testing for htm matches power8 and later. */
3777 if (!BYTES_BIG_ENDIAN
3778 && !(processor_target_table[tune_index].target_enable & OPTION_MASK_HTM))
3779 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_STRICT_ALIGN;
3781 if (!rs6000_fold_gimple)
3782 fprintf (stderr,
3783 "gimple folding of rs6000 builtins has been disabled.\n");
3785 /* Add some warnings for VSX. */
3786 if (TARGET_VSX)
3788 const char *msg = NULL;
3789 if (!TARGET_HARD_FLOAT)
3791 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
3792 msg = N_("%<-mvsx%> requires hardware floating point");
3793 else
3795 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
3796 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
3799 else if (TARGET_AVOID_XFORM > 0)
3800 msg = N_("%<-mvsx%> needs indexed addressing");
3801 else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit
3802 & OPTION_MASK_ALTIVEC))
3804 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
3805 msg = N_("%<-mvsx%> and %<-mno-altivec%> are incompatible");
3806 else
3807 msg = N_("%<-mno-altivec%> disables vsx");
3810 if (msg)
3812 warning (0, msg);
3813 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
3814 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
3818 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
3819 the -mcpu setting to enable options that conflict. */
3820 if ((!TARGET_HARD_FLOAT || !TARGET_ALTIVEC || !TARGET_VSX)
3821 && (rs6000_isa_flags_explicit & (OPTION_MASK_SOFT_FLOAT
3822 | OPTION_MASK_ALTIVEC
3823 | OPTION_MASK_VSX)) != 0)
3824 rs6000_isa_flags &= ~((OPTION_MASK_P8_VECTOR | OPTION_MASK_CRYPTO
3825 | OPTION_MASK_DIRECT_MOVE)
3826 & ~rs6000_isa_flags_explicit);
3828 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
3829 rs6000_print_isa_options (stderr, 0, "before defaults", rs6000_isa_flags);
3831 /* Handle explicit -mno-{altivec,vsx,power8-vector,power9-vector} and turn
3832 off all of the options that depend on those flags. */
3833 ignore_masks = rs6000_disable_incompatible_switches ();
3835 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
3836 unless the user explicitly used the -mno-<option> to disable the code. */
3837 if (TARGET_P9_VECTOR || TARGET_MODULO || TARGET_P9_MISC)
3838 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
3839 else if (TARGET_P9_MINMAX)
3841 if (cpu_index >= 0)
3843 if (cpu_index == PROCESSOR_POWER9)
3845 /* legacy behavior: allow -mcpu=power9 with certain
3846 capabilities explicitly disabled. */
3847 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
3849 else
3850 error ("power9 target option is incompatible with %<%s=<xxx>%> "
3851 "for <xxx> less than power9", "-mcpu");
3853 else if ((ISA_3_0_MASKS_SERVER & rs6000_isa_flags_explicit)
3854 != (ISA_3_0_MASKS_SERVER & rs6000_isa_flags
3855 & rs6000_isa_flags_explicit))
3856 /* Enforce that none of the ISA_3_0_MASKS_SERVER flags
3857 were explicitly cleared. */
3858 error ("%qs incompatible with explicitly disabled options",
3859 "-mpower9-minmax");
3860 else
3861 rs6000_isa_flags |= ISA_3_0_MASKS_SERVER;
3863 else if (TARGET_P8_VECTOR || TARGET_DIRECT_MOVE || TARGET_CRYPTO)
3864 rs6000_isa_flags |= (ISA_2_7_MASKS_SERVER & ~ignore_masks);
3865 else if (TARGET_VSX)
3866 rs6000_isa_flags |= (ISA_2_6_MASKS_SERVER & ~ignore_masks);
3867 else if (TARGET_POPCNTD)
3868 rs6000_isa_flags |= (ISA_2_6_MASKS_EMBEDDED & ~ignore_masks);
3869 else if (TARGET_DFP)
3870 rs6000_isa_flags |= (ISA_2_5_MASKS_SERVER & ~ignore_masks);
3871 else if (TARGET_CMPB)
3872 rs6000_isa_flags |= (ISA_2_5_MASKS_EMBEDDED & ~ignore_masks);
3873 else if (TARGET_FPRND)
3874 rs6000_isa_flags |= (ISA_2_4_MASKS & ~ignore_masks);
3875 else if (TARGET_POPCNTB)
3876 rs6000_isa_flags |= (ISA_2_2_MASKS & ~ignore_masks);
3877 else if (TARGET_ALTIVEC)
3878 rs6000_isa_flags |= (OPTION_MASK_PPC_GFXOPT & ~ignore_masks);
3880 if (TARGET_CRYPTO && !TARGET_ALTIVEC)
3882 if (rs6000_isa_flags_explicit & OPTION_MASK_CRYPTO)
3883 error ("%qs requires %qs", "-mcrypto", "-maltivec");
3884 rs6000_isa_flags &= ~OPTION_MASK_CRYPTO;
3887 if (TARGET_DIRECT_MOVE && !TARGET_VSX)
3889 if (rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
3890 error ("%qs requires %qs", "-mdirect-move", "-mvsx");
3891 rs6000_isa_flags &= ~OPTION_MASK_DIRECT_MOVE;
3894 if (TARGET_P8_VECTOR && !TARGET_ALTIVEC)
3896 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
3897 error ("%qs requires %qs", "-mpower8-vector", "-maltivec");
3898 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
3901 if (TARGET_P8_VECTOR && !TARGET_VSX)
3903 if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
3904 && (rs6000_isa_flags_explicit & OPTION_MASK_VSX))
3905 error ("%qs requires %qs", "-mpower8-vector", "-mvsx");
3906 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR) == 0)
3908 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
3909 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
3910 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
3912 else
3914 /* OPTION_MASK_P8_VECTOR is explicit, and OPTION_MASK_VSX is
3915 not explicit. */
3916 rs6000_isa_flags |= OPTION_MASK_VSX;
3917 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
3921 if (TARGET_DFP && !TARGET_HARD_FLOAT)
3923 if (rs6000_isa_flags_explicit & OPTION_MASK_DFP)
3924 error ("%qs requires %qs", "-mhard-dfp", "-mhard-float");
3925 rs6000_isa_flags &= ~OPTION_MASK_DFP;
3928 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
3929 silently turn off quad memory mode. */
3930 if ((TARGET_QUAD_MEMORY || TARGET_QUAD_MEMORY_ATOMIC) && !TARGET_POWERPC64)
3932 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
3933 warning (0, N_("%<-mquad-memory%> requires 64-bit mode"));
3935 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
3936 warning (0, N_("%<-mquad-memory-atomic%> requires 64-bit mode"));
3938 rs6000_isa_flags &= ~(OPTION_MASK_QUAD_MEMORY
3939 | OPTION_MASK_QUAD_MEMORY_ATOMIC);
3942 /* Non-atomic quad memory load/store are disabled for little endian, since
3943 the words are reversed, but atomic operations can still be done by
3944 swapping the words. */
3945 if (TARGET_QUAD_MEMORY && !WORDS_BIG_ENDIAN)
3947 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
3948 warning (0, N_("%<-mquad-memory%> is not available in little endian "
3949 "mode"));
3951 rs6000_isa_flags &= ~OPTION_MASK_QUAD_MEMORY;
3954 /* Assume if the user asked for normal quad memory instructions, they want
3955 the atomic versions as well, unless they explicity told us not to use quad
3956 word atomic instructions. */
3957 if (TARGET_QUAD_MEMORY
3958 && !TARGET_QUAD_MEMORY_ATOMIC
3959 && ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) == 0))
3960 rs6000_isa_flags |= OPTION_MASK_QUAD_MEMORY_ATOMIC;
3962 /* If we can shrink-wrap the TOC register save separately, then use
3963 -msave-toc-indirect unless explicitly disabled. */
3964 if ((rs6000_isa_flags_explicit & OPTION_MASK_SAVE_TOC_INDIRECT) == 0
3965 && flag_shrink_wrap_separate
3966 && optimize_function_for_speed_p (cfun))
3967 rs6000_isa_flags |= OPTION_MASK_SAVE_TOC_INDIRECT;
3969 /* Enable power8 fusion if we are tuning for power8, even if we aren't
3970 generating power8 instructions. Power9 does not optimize power8 fusion
3971 cases. */
3972 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION))
3974 if (processor_target_table[tune_index].processor == PROCESSOR_POWER8)
3975 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
3976 else
3977 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
3980 /* Setting additional fusion flags turns on base fusion. */
3981 if (!TARGET_P8_FUSION && TARGET_P8_FUSION_SIGN)
3983 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
3985 if (TARGET_P8_FUSION_SIGN)
3986 error ("%qs requires %qs", "-mpower8-fusion-sign",
3987 "-mpower8-fusion");
3989 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
3991 else
3992 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
3995 /* Power8 does not fuse sign extended loads with the addis. If we are
3996 optimizing at high levels for speed, convert a sign extended load into a
3997 zero extending load, and an explicit sign extension. */
3998 if (TARGET_P8_FUSION
3999 && !(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION_SIGN)
4000 && optimize_function_for_speed_p (cfun)
4001 && optimize >= 3)
4002 rs6000_isa_flags |= OPTION_MASK_P8_FUSION_SIGN;
4004 /* ISA 3.0 vector instructions include ISA 2.07. */
4005 if (TARGET_P9_VECTOR && !TARGET_P8_VECTOR)
4007 /* We prefer to not mention undocumented options in
4008 error messages. However, if users have managed to select
4009 power9-vector without selecting power8-vector, they
4010 already know about undocumented flags. */
4011 if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) &&
4012 (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR))
4013 error ("%qs requires %qs", "-mpower9-vector", "-mpower8-vector");
4014 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) == 0)
4016 rs6000_isa_flags &= ~OPTION_MASK_P9_VECTOR;
4017 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4018 rs6000_isa_flags_explicit |= OPTION_MASK_P9_VECTOR;
4020 else
4022 /* OPTION_MASK_P9_VECTOR is explicit and
4023 OPTION_MASK_P8_VECTOR is not explicit. */
4024 rs6000_isa_flags |= OPTION_MASK_P8_VECTOR;
4025 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4029 /* Set -mallow-movmisalign to explicitly on if we have full ISA 2.07
4030 support. If we only have ISA 2.06 support, and the user did not specify
4031 the switch, leave it set to -1 so the movmisalign patterns are enabled,
4032 but we don't enable the full vectorization support */
4033 if (TARGET_ALLOW_MOVMISALIGN == -1 && TARGET_P8_VECTOR && TARGET_DIRECT_MOVE)
4034 TARGET_ALLOW_MOVMISALIGN = 1;
4036 else if (TARGET_ALLOW_MOVMISALIGN && !TARGET_VSX)
4038 if (TARGET_ALLOW_MOVMISALIGN > 0
4039 && global_options_set.x_TARGET_ALLOW_MOVMISALIGN)
4040 error ("%qs requires %qs", "-mallow-movmisalign", "-mvsx");
4042 TARGET_ALLOW_MOVMISALIGN = 0;
4045 /* Determine when unaligned vector accesses are permitted, and when
4046 they are preferred over masked Altivec loads. Note that if
4047 TARGET_ALLOW_MOVMISALIGN has been disabled by the user, then
4048 TARGET_EFFICIENT_UNALIGNED_VSX must be as well. The converse is
4049 not true. */
4050 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4052 if (!TARGET_VSX)
4054 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4055 error ("%qs requires %qs", "-mefficient-unaligned-vsx", "-mvsx");
4057 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4060 else if (!TARGET_ALLOW_MOVMISALIGN)
4062 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4063 error ("%qs requires %qs", "-munefficient-unaligned-vsx",
4064 "-mallow-movmisalign");
4066 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4070 /* Use long double size to select the appropriate long double. We use
4071 TYPE_PRECISION to differentiate the 3 different long double types. We map
4072 128 into the precision used for TFmode. */
4073 int default_long_double_size = (RS6000_DEFAULT_LONG_DOUBLE_SIZE == 64
4074 ? 64
4075 : FLOAT_PRECISION_TFmode);
4077 /* Set long double size before the IEEE 128-bit tests. */
4078 if (!global_options_set.x_rs6000_long_double_type_size)
4080 if (main_target_opt != NULL
4081 && (main_target_opt->x_rs6000_long_double_type_size
4082 != default_long_double_size))
4083 error ("target attribute or pragma changes %<long double%> size");
4084 else
4085 rs6000_long_double_type_size = default_long_double_size;
4087 else if (rs6000_long_double_type_size == 128)
4088 rs6000_long_double_type_size = FLOAT_PRECISION_TFmode;
4089 else if (global_options_set.x_rs6000_ieeequad)
4091 if (global_options.x_rs6000_ieeequad)
4092 error ("%qs requires %qs", "-mabi=ieeelongdouble", "-mlong-double-128");
4093 else
4094 error ("%qs requires %qs", "-mabi=ibmlongdouble", "-mlong-double-128");
4097 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
4098 systems will also set long double to be IEEE 128-bit. AIX and Darwin
4099 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
4100 those systems will not pick up this default. Warn if the user changes the
4101 default unless -Wno-psabi. */
4102 if (!global_options_set.x_rs6000_ieeequad)
4103 rs6000_ieeequad = TARGET_IEEEQUAD_DEFAULT;
4105 else
4107 if (global_options.x_rs6000_ieeequad
4108 && (!TARGET_POPCNTD || !TARGET_VSX))
4109 error ("%qs requires full ISA 2.06 support", "-mabi=ieeelongdouble");
4111 if (rs6000_ieeequad != TARGET_IEEEQUAD_DEFAULT && TARGET_LONG_DOUBLE_128)
4113 static bool warned_change_long_double;
4114 if (!warned_change_long_double)
4116 warned_change_long_double = true;
4117 if (TARGET_IEEEQUAD)
4118 warning (OPT_Wpsabi, "Using IEEE extended precision "
4119 "%<long double%>");
4120 else
4121 warning (OPT_Wpsabi, "Using IBM extended precision "
4122 "%<long double%>");
4127 /* Enable the default support for IEEE 128-bit floating point on Linux VSX
4128 sytems. In GCC 7, we would enable the the IEEE 128-bit floating point
4129 infrastructure (-mfloat128-type) but not enable the actual __float128 type
4130 unless the user used the explicit -mfloat128. In GCC 8, we enable both
4131 the keyword as well as the type. */
4132 TARGET_FLOAT128_TYPE = TARGET_FLOAT128_ENABLE_TYPE && TARGET_VSX;
4134 /* IEEE 128-bit floating point requires VSX support. */
4135 if (TARGET_FLOAT128_KEYWORD)
4137 if (!TARGET_VSX)
4139 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) != 0)
4140 error ("%qs requires VSX support", "%<-mfloat128%>");
4142 TARGET_FLOAT128_TYPE = 0;
4143 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_KEYWORD
4144 | OPTION_MASK_FLOAT128_HW);
4146 else if (!TARGET_FLOAT128_TYPE)
4148 TARGET_FLOAT128_TYPE = 1;
4149 warning (0, "The %<-mfloat128%> option may not be fully supported");
4153 /* Enable the __float128 keyword under Linux by default. */
4154 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_KEYWORD
4155 && (rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) == 0)
4156 rs6000_isa_flags |= OPTION_MASK_FLOAT128_KEYWORD;
4158 /* If we have are supporting the float128 type and full ISA 3.0 support,
4159 enable -mfloat128-hardware by default. However, don't enable the
4160 __float128 keyword if it was explicitly turned off. 64-bit mode is needed
4161 because sometimes the compiler wants to put things in an integer
4162 container, and if we don't have __int128 support, it is impossible. */
4163 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_HW && TARGET_64BIT
4164 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) == ISA_3_0_MASKS_IEEE
4165 && !(rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW))
4166 rs6000_isa_flags |= OPTION_MASK_FLOAT128_HW;
4168 if (TARGET_FLOAT128_HW
4169 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) != ISA_3_0_MASKS_IEEE)
4171 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4172 error ("%qs requires full ISA 3.0 support", "%<-mfloat128-hardware%>");
4174 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4177 if (TARGET_FLOAT128_HW && !TARGET_64BIT)
4179 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4180 error ("%qs requires %qs", "%<-mfloat128-hardware%>", "-m64");
4182 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4185 /* -mprefixed-addr (and hence -mpcrel) requires -mcpu=future. */
4186 if (TARGET_PREFIXED_ADDR && !TARGET_FUTURE)
4188 if ((rs6000_isa_flags_explicit & OPTION_MASK_PCREL) != 0)
4189 error ("%qs requires %qs", "-mpcrel", "-mcpu=future");
4190 else if ((rs6000_isa_flags_explicit & OPTION_MASK_PREFIXED_ADDR) != 0)
4191 error ("%qs requires %qs", "-mprefixed-addr", "-mcpu=future");
4193 rs6000_isa_flags &= ~(OPTION_MASK_PCREL | OPTION_MASK_PREFIXED_ADDR);
4196 /* -mpcrel requires prefixed load/store addressing. */
4197 if (TARGET_PCREL && !TARGET_PREFIXED_ADDR)
4199 if ((rs6000_isa_flags_explicit & OPTION_MASK_PCREL) != 0)
4200 error ("%qs requires %qs", "-mpcrel", "-mprefixed-addr");
4202 rs6000_isa_flags &= ~OPTION_MASK_PCREL;
4205 /* Print the options after updating the defaults. */
4206 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4207 rs6000_print_isa_options (stderr, 0, "after defaults", rs6000_isa_flags);
4209 /* E500mc does "better" if we inline more aggressively. Respect the
4210 user's opinion, though. */
4211 if (rs6000_block_move_inline_limit == 0
4212 && (rs6000_tune == PROCESSOR_PPCE500MC
4213 || rs6000_tune == PROCESSOR_PPCE500MC64
4214 || rs6000_tune == PROCESSOR_PPCE5500
4215 || rs6000_tune == PROCESSOR_PPCE6500))
4216 rs6000_block_move_inline_limit = 128;
4218 /* store_one_arg depends on expand_block_move to handle at least the
4219 size of reg_parm_stack_space. */
4220 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
4221 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
4223 if (global_init_p)
4225 /* If the appropriate debug option is enabled, replace the target hooks
4226 with debug versions that call the real version and then prints
4227 debugging information. */
4228 if (TARGET_DEBUG_COST)
4230 targetm.rtx_costs = rs6000_debug_rtx_costs;
4231 targetm.address_cost = rs6000_debug_address_cost;
4232 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
4235 if (TARGET_DEBUG_ADDR)
4237 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
4238 targetm.legitimize_address = rs6000_debug_legitimize_address;
4239 rs6000_secondary_reload_class_ptr
4240 = rs6000_debug_secondary_reload_class;
4241 targetm.secondary_memory_needed
4242 = rs6000_debug_secondary_memory_needed;
4243 targetm.can_change_mode_class
4244 = rs6000_debug_can_change_mode_class;
4245 rs6000_preferred_reload_class_ptr
4246 = rs6000_debug_preferred_reload_class;
4247 rs6000_mode_dependent_address_ptr
4248 = rs6000_debug_mode_dependent_address;
4251 if (rs6000_veclibabi_name)
4253 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
4254 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
4255 else
4257 error ("unknown vectorization library ABI type (%qs) for "
4258 "%qs switch", rs6000_veclibabi_name, "-mveclibabi=");
4259 ret = false;
4264 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
4265 target attribute or pragma which automatically enables both options,
4266 unless the altivec ABI was set. This is set by default for 64-bit, but
4267 not for 32-bit. */
4268 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4270 TARGET_FLOAT128_TYPE = 0;
4271 rs6000_isa_flags &= ~((OPTION_MASK_VSX | OPTION_MASK_ALTIVEC
4272 | OPTION_MASK_FLOAT128_KEYWORD)
4273 & ~rs6000_isa_flags_explicit);
4276 /* Enable Altivec ABI for AIX -maltivec. */
4277 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
4279 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4280 error ("target attribute or pragma changes AltiVec ABI");
4281 else
4282 rs6000_altivec_abi = 1;
4285 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
4286 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
4287 be explicitly overridden in either case. */
4288 if (TARGET_ELF)
4290 if (!global_options_set.x_rs6000_altivec_abi
4291 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
4293 if (main_target_opt != NULL &&
4294 !main_target_opt->x_rs6000_altivec_abi)
4295 error ("target attribute or pragma changes AltiVec ABI");
4296 else
4297 rs6000_altivec_abi = 1;
4301 /* Set the Darwin64 ABI as default for 64-bit Darwin.
4302 So far, the only darwin64 targets are also MACH-O. */
4303 if (TARGET_MACHO
4304 && DEFAULT_ABI == ABI_DARWIN
4305 && TARGET_64BIT)
4307 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
4308 error ("target attribute or pragma changes darwin64 ABI");
4309 else
4311 rs6000_darwin64_abi = 1;
4312 /* Default to natural alignment, for better performance. */
4313 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
4317 /* Place FP constants in the constant pool instead of TOC
4318 if section anchors enabled. */
4319 if (flag_section_anchors
4320 && !global_options_set.x_TARGET_NO_FP_IN_TOC)
4321 TARGET_NO_FP_IN_TOC = 1;
4323 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4324 rs6000_print_isa_options (stderr, 0, "before subtarget", rs6000_isa_flags);
4326 #ifdef SUBTARGET_OVERRIDE_OPTIONS
4327 SUBTARGET_OVERRIDE_OPTIONS;
4328 #endif
4329 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
4330 SUBSUBTARGET_OVERRIDE_OPTIONS;
4331 #endif
4332 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
4333 SUB3TARGET_OVERRIDE_OPTIONS;
4334 #endif
4336 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4337 rs6000_print_isa_options (stderr, 0, "after subtarget", rs6000_isa_flags);
4339 rs6000_always_hint = (rs6000_tune != PROCESSOR_POWER4
4340 && rs6000_tune != PROCESSOR_POWER5
4341 && rs6000_tune != PROCESSOR_POWER6
4342 && rs6000_tune != PROCESSOR_POWER7
4343 && rs6000_tune != PROCESSOR_POWER8
4344 && rs6000_tune != PROCESSOR_POWER9
4345 && rs6000_tune != PROCESSOR_FUTURE
4346 && rs6000_tune != PROCESSOR_PPCA2
4347 && rs6000_tune != PROCESSOR_CELL
4348 && rs6000_tune != PROCESSOR_PPC476);
4349 rs6000_sched_groups = (rs6000_tune == PROCESSOR_POWER4
4350 || rs6000_tune == PROCESSOR_POWER5
4351 || rs6000_tune == PROCESSOR_POWER7
4352 || rs6000_tune == PROCESSOR_POWER8);
4353 rs6000_align_branch_targets = (rs6000_tune == PROCESSOR_POWER4
4354 || rs6000_tune == PROCESSOR_POWER5
4355 || rs6000_tune == PROCESSOR_POWER6
4356 || rs6000_tune == PROCESSOR_POWER7
4357 || rs6000_tune == PROCESSOR_POWER8
4358 || rs6000_tune == PROCESSOR_POWER9
4359 || rs6000_tune == PROCESSOR_FUTURE
4360 || rs6000_tune == PROCESSOR_PPCE500MC
4361 || rs6000_tune == PROCESSOR_PPCE500MC64
4362 || rs6000_tune == PROCESSOR_PPCE5500
4363 || rs6000_tune == PROCESSOR_PPCE6500);
4365 /* Allow debug switches to override the above settings. These are set to -1
4366 in rs6000.opt to indicate the user hasn't directly set the switch. */
4367 if (TARGET_ALWAYS_HINT >= 0)
4368 rs6000_always_hint = TARGET_ALWAYS_HINT;
4370 if (TARGET_SCHED_GROUPS >= 0)
4371 rs6000_sched_groups = TARGET_SCHED_GROUPS;
4373 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
4374 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
4376 rs6000_sched_restricted_insns_priority
4377 = (rs6000_sched_groups ? 1 : 0);
4379 /* Handle -msched-costly-dep option. */
4380 rs6000_sched_costly_dep
4381 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
4383 if (rs6000_sched_costly_dep_str)
4385 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
4386 rs6000_sched_costly_dep = no_dep_costly;
4387 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
4388 rs6000_sched_costly_dep = all_deps_costly;
4389 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
4390 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
4391 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
4392 rs6000_sched_costly_dep = store_to_load_dep_costly;
4393 else
4394 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
4395 atoi (rs6000_sched_costly_dep_str));
4398 /* Handle -minsert-sched-nops option. */
4399 rs6000_sched_insert_nops
4400 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
4402 if (rs6000_sched_insert_nops_str)
4404 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
4405 rs6000_sched_insert_nops = sched_finish_none;
4406 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
4407 rs6000_sched_insert_nops = sched_finish_pad_groups;
4408 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
4409 rs6000_sched_insert_nops = sched_finish_regroup_exact;
4410 else
4411 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
4412 atoi (rs6000_sched_insert_nops_str));
4415 /* Handle stack protector */
4416 if (!global_options_set.x_rs6000_stack_protector_guard)
4417 #ifdef TARGET_THREAD_SSP_OFFSET
4418 rs6000_stack_protector_guard = SSP_TLS;
4419 #else
4420 rs6000_stack_protector_guard = SSP_GLOBAL;
4421 #endif
4423 #ifdef TARGET_THREAD_SSP_OFFSET
4424 rs6000_stack_protector_guard_offset = TARGET_THREAD_SSP_OFFSET;
4425 rs6000_stack_protector_guard_reg = TARGET_64BIT ? 13 : 2;
4426 #endif
4428 if (global_options_set.x_rs6000_stack_protector_guard_offset_str)
4430 char *endp;
4431 const char *str = rs6000_stack_protector_guard_offset_str;
4433 errno = 0;
4434 long offset = strtol (str, &endp, 0);
4435 if (!*str || *endp || errno)
4436 error ("%qs is not a valid number in %qs", str,
4437 "-mstack-protector-guard-offset=");
4439 if (!IN_RANGE (offset, -0x8000, 0x7fff)
4440 || (TARGET_64BIT && (offset & 3)))
4441 error ("%qs is not a valid offset in %qs", str,
4442 "-mstack-protector-guard-offset=");
4444 rs6000_stack_protector_guard_offset = offset;
4447 if (global_options_set.x_rs6000_stack_protector_guard_reg_str)
4449 const char *str = rs6000_stack_protector_guard_reg_str;
4450 int reg = decode_reg_name (str);
4452 if (!IN_RANGE (reg, 1, 31))
4453 error ("%qs is not a valid base register in %qs", str,
4454 "-mstack-protector-guard-reg=");
4456 rs6000_stack_protector_guard_reg = reg;
4459 if (rs6000_stack_protector_guard == SSP_TLS
4460 && !IN_RANGE (rs6000_stack_protector_guard_reg, 1, 31))
4461 error ("%qs needs a valid base register", "-mstack-protector-guard=tls");
4463 if (global_init_p)
4465 #ifdef TARGET_REGNAMES
4466 /* If the user desires alternate register names, copy in the
4467 alternate names now. */
4468 if (TARGET_REGNAMES)
4469 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
4470 #endif
4472 /* Set aix_struct_return last, after the ABI is determined.
4473 If -maix-struct-return or -msvr4-struct-return was explicitly
4474 used, don't override with the ABI default. */
4475 if (!global_options_set.x_aix_struct_return)
4476 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
4478 #if 0
4479 /* IBM XL compiler defaults to unsigned bitfields. */
4480 if (TARGET_XL_COMPAT)
4481 flag_signed_bitfields = 0;
4482 #endif
4484 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
4485 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
4487 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
4489 /* We can only guarantee the availability of DI pseudo-ops when
4490 assembling for 64-bit targets. */
4491 if (!TARGET_64BIT)
4493 targetm.asm_out.aligned_op.di = NULL;
4494 targetm.asm_out.unaligned_op.di = NULL;
4498 /* Set branch target alignment, if not optimizing for size. */
4499 if (!optimize_size)
4501 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
4502 aligned 8byte to avoid misprediction by the branch predictor. */
4503 if (rs6000_tune == PROCESSOR_TITAN
4504 || rs6000_tune == PROCESSOR_CELL)
4506 if (flag_align_functions && !str_align_functions)
4507 str_align_functions = "8";
4508 if (flag_align_jumps && !str_align_jumps)
4509 str_align_jumps = "8";
4510 if (flag_align_loops && !str_align_loops)
4511 str_align_loops = "8";
4513 if (rs6000_align_branch_targets)
4515 if (flag_align_functions && !str_align_functions)
4516 str_align_functions = "16";
4517 if (flag_align_jumps && !str_align_jumps)
4518 str_align_jumps = "16";
4519 if (flag_align_loops && !str_align_loops)
4521 can_override_loop_align = 1;
4522 str_align_loops = "16";
4526 if (flag_align_jumps && !str_align_jumps)
4527 str_align_jumps = "16";
4528 if (flag_align_loops && !str_align_loops)
4529 str_align_loops = "16";
4532 /* Arrange to save and restore machine status around nested functions. */
4533 init_machine_status = rs6000_init_machine_status;
4535 /* We should always be splitting complex arguments, but we can't break
4536 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
4537 if (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
4538 targetm.calls.split_complex_arg = NULL;
4540 /* The AIX and ELFv1 ABIs define standard function descriptors. */
4541 if (DEFAULT_ABI == ABI_AIX)
4542 targetm.calls.custom_function_descriptors = 0;
4545 /* Initialize rs6000_cost with the appropriate target costs. */
4546 if (optimize_size)
4547 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
4548 else
4549 switch (rs6000_tune)
4551 case PROCESSOR_RS64A:
4552 rs6000_cost = &rs64a_cost;
4553 break;
4555 case PROCESSOR_MPCCORE:
4556 rs6000_cost = &mpccore_cost;
4557 break;
4559 case PROCESSOR_PPC403:
4560 rs6000_cost = &ppc403_cost;
4561 break;
4563 case PROCESSOR_PPC405:
4564 rs6000_cost = &ppc405_cost;
4565 break;
4567 case PROCESSOR_PPC440:
4568 rs6000_cost = &ppc440_cost;
4569 break;
4571 case PROCESSOR_PPC476:
4572 rs6000_cost = &ppc476_cost;
4573 break;
4575 case PROCESSOR_PPC601:
4576 rs6000_cost = &ppc601_cost;
4577 break;
4579 case PROCESSOR_PPC603:
4580 rs6000_cost = &ppc603_cost;
4581 break;
4583 case PROCESSOR_PPC604:
4584 rs6000_cost = &ppc604_cost;
4585 break;
4587 case PROCESSOR_PPC604e:
4588 rs6000_cost = &ppc604e_cost;
4589 break;
4591 case PROCESSOR_PPC620:
4592 rs6000_cost = &ppc620_cost;
4593 break;
4595 case PROCESSOR_PPC630:
4596 rs6000_cost = &ppc630_cost;
4597 break;
4599 case PROCESSOR_CELL:
4600 rs6000_cost = &ppccell_cost;
4601 break;
4603 case PROCESSOR_PPC750:
4604 case PROCESSOR_PPC7400:
4605 rs6000_cost = &ppc750_cost;
4606 break;
4608 case PROCESSOR_PPC7450:
4609 rs6000_cost = &ppc7450_cost;
4610 break;
4612 case PROCESSOR_PPC8540:
4613 case PROCESSOR_PPC8548:
4614 rs6000_cost = &ppc8540_cost;
4615 break;
4617 case PROCESSOR_PPCE300C2:
4618 case PROCESSOR_PPCE300C3:
4619 rs6000_cost = &ppce300c2c3_cost;
4620 break;
4622 case PROCESSOR_PPCE500MC:
4623 rs6000_cost = &ppce500mc_cost;
4624 break;
4626 case PROCESSOR_PPCE500MC64:
4627 rs6000_cost = &ppce500mc64_cost;
4628 break;
4630 case PROCESSOR_PPCE5500:
4631 rs6000_cost = &ppce5500_cost;
4632 break;
4634 case PROCESSOR_PPCE6500:
4635 rs6000_cost = &ppce6500_cost;
4636 break;
4638 case PROCESSOR_TITAN:
4639 rs6000_cost = &titan_cost;
4640 break;
4642 case PROCESSOR_POWER4:
4643 case PROCESSOR_POWER5:
4644 rs6000_cost = &power4_cost;
4645 break;
4647 case PROCESSOR_POWER6:
4648 rs6000_cost = &power6_cost;
4649 break;
4651 case PROCESSOR_POWER7:
4652 rs6000_cost = &power7_cost;
4653 break;
4655 case PROCESSOR_POWER8:
4656 rs6000_cost = &power8_cost;
4657 break;
4659 case PROCESSOR_POWER9:
4660 case PROCESSOR_FUTURE:
4661 rs6000_cost = &power9_cost;
4662 break;
4664 case PROCESSOR_PPCA2:
4665 rs6000_cost = &ppca2_cost;
4666 break;
4668 default:
4669 gcc_unreachable ();
4672 if (global_init_p)
4674 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
4675 rs6000_cost->simultaneous_prefetches,
4676 global_options.x_param_values,
4677 global_options_set.x_param_values);
4678 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
4679 global_options.x_param_values,
4680 global_options_set.x_param_values);
4681 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
4682 rs6000_cost->cache_line_size,
4683 global_options.x_param_values,
4684 global_options_set.x_param_values);
4685 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
4686 global_options.x_param_values,
4687 global_options_set.x_param_values);
4689 /* Increase loop peeling limits based on performance analysis. */
4690 maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
4691 global_options.x_param_values,
4692 global_options_set.x_param_values);
4693 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
4694 global_options.x_param_values,
4695 global_options_set.x_param_values);
4697 /* Use the 'model' -fsched-pressure algorithm by default. */
4698 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM,
4699 SCHED_PRESSURE_MODEL,
4700 global_options.x_param_values,
4701 global_options_set.x_param_values);
4703 /* If using typedef char *va_list, signal that
4704 __builtin_va_start (&ap, 0) can be optimized to
4705 ap = __builtin_next_arg (0). */
4706 if (DEFAULT_ABI != ABI_V4)
4707 targetm.expand_builtin_va_start = NULL;
4710 /* If not explicitly specified via option, decide whether to generate indexed
4711 load/store instructions. A value of -1 indicates that the
4712 initial value of this variable has not been overwritten. During
4713 compilation, TARGET_AVOID_XFORM is either 0 or 1. */
4714 if (TARGET_AVOID_XFORM == -1)
4715 /* Avoid indexed addressing when targeting Power6 in order to avoid the
4716 DERAT mispredict penalty. However the LVE and STVE altivec instructions
4717 need indexed accesses and the type used is the scalar type of the element
4718 being loaded or stored. */
4719 TARGET_AVOID_XFORM = (rs6000_tune == PROCESSOR_POWER6 && TARGET_CMPB
4720 && !TARGET_ALTIVEC);
4722 /* Set the -mrecip options. */
4723 if (rs6000_recip_name)
4725 char *p = ASTRDUP (rs6000_recip_name);
4726 char *q;
4727 unsigned int mask, i;
4728 bool invert;
4730 while ((q = strtok (p, ",")) != NULL)
4732 p = NULL;
4733 if (*q == '!')
4735 invert = true;
4736 q++;
4738 else
4739 invert = false;
4741 if (!strcmp (q, "default"))
4742 mask = ((TARGET_RECIP_PRECISION)
4743 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
4744 else
4746 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
4747 if (!strcmp (q, recip_options[i].string))
4749 mask = recip_options[i].mask;
4750 break;
4753 if (i == ARRAY_SIZE (recip_options))
4755 error ("unknown option for %<%s=%s%>", "-mrecip", q);
4756 invert = false;
4757 mask = 0;
4758 ret = false;
4762 if (invert)
4763 rs6000_recip_control &= ~mask;
4764 else
4765 rs6000_recip_control |= mask;
4769 /* Set the builtin mask of the various options used that could affect which
4770 builtins were used. In the past we used target_flags, but we've run out
4771 of bits, and some options are no longer in target_flags. */
4772 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
4773 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
4774 rs6000_print_builtin_options (stderr, 0, "builtin mask",
4775 rs6000_builtin_mask);
4777 /* Initialize all of the registers. */
4778 rs6000_init_hard_regno_mode_ok (global_init_p);
4780 /* Save the initial options in case the user does function specific options */
4781 if (global_init_p)
4782 target_option_default_node = target_option_current_node
4783 = build_target_option_node (&global_options);
4785 /* If not explicitly specified via option, decide whether to generate the
4786 extra blr's required to preserve the link stack on some cpus (eg, 476). */
4787 if (TARGET_LINK_STACK == -1)
4788 SET_TARGET_LINK_STACK (rs6000_tune == PROCESSOR_PPC476 && flag_pic);
4790 /* Deprecate use of -mno-speculate-indirect-jumps. */
4791 if (!rs6000_speculate_indirect_jumps)
4792 warning (0, "%qs is deprecated and not recommended in any circumstances",
4793 "-mno-speculate-indirect-jumps");
4795 return ret;
4798 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
4799 define the target cpu type. */
4801 static void
4802 rs6000_option_override (void)
4804 (void) rs6000_option_override_internal (true);
4808 /* Implement targetm.vectorize.builtin_mask_for_load. */
4809 static tree
4810 rs6000_builtin_mask_for_load (void)
4812 /* Don't use lvsl/vperm for P8 and similarly efficient machines. */
4813 if ((TARGET_ALTIVEC && !TARGET_VSX)
4814 || (TARGET_VSX && !TARGET_EFFICIENT_UNALIGNED_VSX))
4815 return altivec_builtin_mask_for_load;
4816 else
4817 return 0;
4820 /* Implement LOOP_ALIGN. */
4821 align_flags
4822 rs6000_loop_align (rtx label)
4824 basic_block bb;
4825 int ninsns;
4827 /* Don't override loop alignment if -falign-loops was specified. */
4828 if (!can_override_loop_align)
4829 return align_loops;
4831 bb = BLOCK_FOR_INSN (label);
4832 ninsns = num_loop_insns(bb->loop_father);
4834 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
4835 if (ninsns > 4 && ninsns <= 8
4836 && (rs6000_tune == PROCESSOR_POWER4
4837 || rs6000_tune == PROCESSOR_POWER5
4838 || rs6000_tune == PROCESSOR_POWER6
4839 || rs6000_tune == PROCESSOR_POWER7
4840 || rs6000_tune == PROCESSOR_POWER8))
4841 return align_flags (5);
4842 else
4843 return align_loops;
4846 /* Return true iff, data reference of TYPE can reach vector alignment (16)
4847 after applying N number of iterations. This routine does not determine
4848 how may iterations are required to reach desired alignment. */
4850 static bool
4851 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
4853 if (is_packed)
4854 return false;
4856 if (TARGET_32BIT)
4858 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
4859 return true;
4861 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
4862 return true;
4864 return false;
4866 else
4868 if (TARGET_MACHO)
4869 return false;
4871 /* Assuming that all other types are naturally aligned. CHECKME! */
4872 return true;
4876 /* Return true if the vector misalignment factor is supported by the
4877 target. */
4878 static bool
4879 rs6000_builtin_support_vector_misalignment (machine_mode mode,
4880 const_tree type,
4881 int misalignment,
4882 bool is_packed)
4884 if (TARGET_VSX)
4886 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4887 return true;
4889 /* Return if movmisalign pattern is not supported for this mode. */
4890 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
4891 return false;
4893 if (misalignment == -1)
4895 /* Misalignment factor is unknown at compile time but we know
4896 it's word aligned. */
4897 if (rs6000_vector_alignment_reachable (type, is_packed))
4899 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
4901 if (element_size == 64 || element_size == 32)
4902 return true;
4905 return false;
4908 /* VSX supports word-aligned vector. */
4909 if (misalignment % 4 == 0)
4910 return true;
4912 return false;
4915 /* Implement targetm.vectorize.builtin_vectorization_cost. */
4916 static int
4917 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
4918 tree vectype, int misalign)
4920 unsigned elements;
4921 tree elem_type;
4923 switch (type_of_cost)
4925 case scalar_stmt:
4926 case scalar_load:
4927 case scalar_store:
4928 case vector_stmt:
4929 case vector_load:
4930 case vector_store:
4931 case vec_to_scalar:
4932 case scalar_to_vec:
4933 case cond_branch_not_taken:
4934 return 1;
4936 case vec_perm:
4937 if (TARGET_VSX)
4938 return 3;
4939 else
4940 return 1;
4942 case vec_promote_demote:
4943 if (TARGET_VSX)
4944 return 4;
4945 else
4946 return 1;
4948 case cond_branch_taken:
4949 return 3;
4951 case unaligned_load:
4952 case vector_gather_load:
4953 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4954 return 1;
4956 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
4958 elements = TYPE_VECTOR_SUBPARTS (vectype);
4959 if (elements == 2)
4960 /* Double word aligned. */
4961 return 2;
4963 if (elements == 4)
4965 switch (misalign)
4967 case 8:
4968 /* Double word aligned. */
4969 return 2;
4971 case -1:
4972 /* Unknown misalignment. */
4973 case 4:
4974 case 12:
4975 /* Word aligned. */
4976 return 22;
4978 default:
4979 gcc_unreachable ();
4984 if (TARGET_ALTIVEC)
4985 /* Misaligned loads are not supported. */
4986 gcc_unreachable ();
4988 return 2;
4990 case unaligned_store:
4991 case vector_scatter_store:
4992 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4993 return 1;
4995 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
4997 elements = TYPE_VECTOR_SUBPARTS (vectype);
4998 if (elements == 2)
4999 /* Double word aligned. */
5000 return 2;
5002 if (elements == 4)
5004 switch (misalign)
5006 case 8:
5007 /* Double word aligned. */
5008 return 2;
5010 case -1:
5011 /* Unknown misalignment. */
5012 case 4:
5013 case 12:
5014 /* Word aligned. */
5015 return 23;
5017 default:
5018 gcc_unreachable ();
5023 if (TARGET_ALTIVEC)
5024 /* Misaligned stores are not supported. */
5025 gcc_unreachable ();
5027 return 2;
5029 case vec_construct:
5030 /* This is a rough approximation assuming non-constant elements
5031 constructed into a vector via element insertion. FIXME:
5032 vec_construct is not granular enough for uniformly good
5033 decisions. If the initialization is a splat, this is
5034 cheaper than we estimate. Improve this someday. */
5035 elem_type = TREE_TYPE (vectype);
5036 /* 32-bit vectors loaded into registers are stored as double
5037 precision, so we need 2 permutes, 2 converts, and 1 merge
5038 to construct a vector of short floats from them. */
5039 if (SCALAR_FLOAT_TYPE_P (elem_type)
5040 && TYPE_PRECISION (elem_type) == 32)
5041 return 5;
5042 /* On POWER9, integer vector types are built up in GPRs and then
5043 use a direct move (2 cycles). For POWER8 this is even worse,
5044 as we need two direct moves and a merge, and the direct moves
5045 are five cycles. */
5046 else if (INTEGRAL_TYPE_P (elem_type))
5048 if (TARGET_P9_VECTOR)
5049 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 2;
5050 else
5051 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 5;
5053 else
5054 /* V2DFmode doesn't need a direct move. */
5055 return 2;
5057 default:
5058 gcc_unreachable ();
5062 /* Implement targetm.vectorize.preferred_simd_mode. */
5064 static machine_mode
5065 rs6000_preferred_simd_mode (scalar_mode mode)
5067 if (TARGET_VSX)
5068 switch (mode)
5070 case E_DFmode:
5071 return V2DFmode;
5072 default:;
5074 if (TARGET_ALTIVEC || TARGET_VSX)
5075 switch (mode)
5077 case E_SFmode:
5078 return V4SFmode;
5079 case E_TImode:
5080 return V1TImode;
5081 case E_DImode:
5082 return V2DImode;
5083 case E_SImode:
5084 return V4SImode;
5085 case E_HImode:
5086 return V8HImode;
5087 case E_QImode:
5088 return V16QImode;
5089 default:;
5091 return word_mode;
5094 typedef struct _rs6000_cost_data
5096 struct loop *loop_info;
5097 unsigned cost[3];
5098 } rs6000_cost_data;
5100 /* Test for likely overcommitment of vector hardware resources. If a
5101 loop iteration is relatively large, and too large a percentage of
5102 instructions in the loop are vectorized, the cost model may not
5103 adequately reflect delays from unavailable vector resources.
5104 Penalize the loop body cost for this case. */
5106 static void
5107 rs6000_density_test (rs6000_cost_data *data)
5109 const int DENSITY_PCT_THRESHOLD = 85;
5110 const int DENSITY_SIZE_THRESHOLD = 70;
5111 const int DENSITY_PENALTY = 10;
5112 struct loop *loop = data->loop_info;
5113 basic_block *bbs = get_loop_body (loop);
5114 int nbbs = loop->num_nodes;
5115 loop_vec_info loop_vinfo = loop_vec_info_for_loop (data->loop_info);
5116 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
5117 int i, density_pct;
5119 for (i = 0; i < nbbs; i++)
5121 basic_block bb = bbs[i];
5122 gimple_stmt_iterator gsi;
5124 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5126 gimple *stmt = gsi_stmt (gsi);
5127 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (stmt);
5129 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5130 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
5131 not_vec_cost++;
5135 free (bbs);
5136 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
5138 if (density_pct > DENSITY_PCT_THRESHOLD
5139 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
5141 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
5142 if (dump_enabled_p ())
5143 dump_printf_loc (MSG_NOTE, vect_location,
5144 "density %d%%, cost %d exceeds threshold, penalizing "
5145 "loop body cost by %d%%", density_pct,
5146 vec_cost + not_vec_cost, DENSITY_PENALTY);
5150 /* Implement targetm.vectorize.init_cost. */
5152 /* For each vectorized loop, this var holds TRUE iff a non-memory vector
5153 instruction is needed by the vectorization. */
5154 static bool rs6000_vect_nonmem;
5156 static void *
5157 rs6000_init_cost (struct loop *loop_info)
5159 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
5160 data->loop_info = loop_info;
5161 data->cost[vect_prologue] = 0;
5162 data->cost[vect_body] = 0;
5163 data->cost[vect_epilogue] = 0;
5164 rs6000_vect_nonmem = false;
5165 return data;
5168 /* Implement targetm.vectorize.add_stmt_cost. */
5170 static unsigned
5171 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
5172 struct _stmt_vec_info *stmt_info, int misalign,
5173 enum vect_cost_model_location where)
5175 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5176 unsigned retval = 0;
5178 if (flag_vect_cost_model)
5180 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
5181 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
5182 misalign);
5183 /* Statements in an inner loop relative to the loop being
5184 vectorized are weighted more heavily. The value here is
5185 arbitrary and could potentially be improved with analysis. */
5186 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
5187 count *= 50; /* FIXME. */
5189 retval = (unsigned) (count * stmt_cost);
5190 cost_data->cost[where] += retval;
5192 /* Check whether we're doing something other than just a copy loop.
5193 Not all such loops may be profitably vectorized; see
5194 rs6000_finish_cost. */
5195 if ((kind == vec_to_scalar || kind == vec_perm
5196 || kind == vec_promote_demote || kind == vec_construct
5197 || kind == scalar_to_vec)
5198 || (where == vect_body && kind == vector_stmt))
5199 rs6000_vect_nonmem = true;
5202 return retval;
5205 /* Implement targetm.vectorize.finish_cost. */
5207 static void
5208 rs6000_finish_cost (void *data, unsigned *prologue_cost,
5209 unsigned *body_cost, unsigned *epilogue_cost)
5211 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5213 if (cost_data->loop_info)
5214 rs6000_density_test (cost_data);
5216 /* Don't vectorize minimum-vectorization-factor, simple copy loops
5217 that require versioning for any reason. The vectorization is at
5218 best a wash inside the loop, and the versioning checks make
5219 profitability highly unlikely and potentially quite harmful. */
5220 if (cost_data->loop_info)
5222 loop_vec_info vec_info = loop_vec_info_for_loop (cost_data->loop_info);
5223 if (!rs6000_vect_nonmem
5224 && LOOP_VINFO_VECT_FACTOR (vec_info) == 2
5225 && LOOP_REQUIRES_VERSIONING (vec_info))
5226 cost_data->cost[vect_body] += 10000;
5229 *prologue_cost = cost_data->cost[vect_prologue];
5230 *body_cost = cost_data->cost[vect_body];
5231 *epilogue_cost = cost_data->cost[vect_epilogue];
5234 /* Implement targetm.vectorize.destroy_cost_data. */
5236 static void
5237 rs6000_destroy_cost_data (void *data)
5239 free (data);
5242 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
5243 library with vectorized intrinsics. */
5245 static tree
5246 rs6000_builtin_vectorized_libmass (combined_fn fn, tree type_out,
5247 tree type_in)
5249 char name[32];
5250 const char *suffix = NULL;
5251 tree fntype, new_fndecl, bdecl = NULL_TREE;
5252 int n_args = 1;
5253 const char *bname;
5254 machine_mode el_mode, in_mode;
5255 int n, in_n;
5257 /* Libmass is suitable for unsafe math only as it does not correctly support
5258 parts of IEEE with the required precision such as denormals. Only support
5259 it if we have VSX to use the simd d2 or f4 functions.
5260 XXX: Add variable length support. */
5261 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
5262 return NULL_TREE;
5264 el_mode = TYPE_MODE (TREE_TYPE (type_out));
5265 n = TYPE_VECTOR_SUBPARTS (type_out);
5266 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5267 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5268 if (el_mode != in_mode
5269 || n != in_n)
5270 return NULL_TREE;
5272 switch (fn)
5274 CASE_CFN_ATAN2:
5275 CASE_CFN_HYPOT:
5276 CASE_CFN_POW:
5277 n_args = 2;
5278 gcc_fallthrough ();
5280 CASE_CFN_ACOS:
5281 CASE_CFN_ACOSH:
5282 CASE_CFN_ASIN:
5283 CASE_CFN_ASINH:
5284 CASE_CFN_ATAN:
5285 CASE_CFN_ATANH:
5286 CASE_CFN_CBRT:
5287 CASE_CFN_COS:
5288 CASE_CFN_COSH:
5289 CASE_CFN_ERF:
5290 CASE_CFN_ERFC:
5291 CASE_CFN_EXP2:
5292 CASE_CFN_EXP:
5293 CASE_CFN_EXPM1:
5294 CASE_CFN_LGAMMA:
5295 CASE_CFN_LOG10:
5296 CASE_CFN_LOG1P:
5297 CASE_CFN_LOG2:
5298 CASE_CFN_LOG:
5299 CASE_CFN_SIN:
5300 CASE_CFN_SINH:
5301 CASE_CFN_SQRT:
5302 CASE_CFN_TAN:
5303 CASE_CFN_TANH:
5304 if (el_mode == DFmode && n == 2)
5306 bdecl = mathfn_built_in (double_type_node, fn);
5307 suffix = "d2"; /* pow -> powd2 */
5309 else if (el_mode == SFmode && n == 4)
5311 bdecl = mathfn_built_in (float_type_node, fn);
5312 suffix = "4"; /* powf -> powf4 */
5314 else
5315 return NULL_TREE;
5316 if (!bdecl)
5317 return NULL_TREE;
5318 break;
5320 default:
5321 return NULL_TREE;
5324 gcc_assert (suffix != NULL);
5325 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
5326 if (!bname)
5327 return NULL_TREE;
5329 strcpy (name, bname + sizeof ("__builtin_") - 1);
5330 strcat (name, suffix);
5332 if (n_args == 1)
5333 fntype = build_function_type_list (type_out, type_in, NULL);
5334 else if (n_args == 2)
5335 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
5336 else
5337 gcc_unreachable ();
5339 /* Build a function declaration for the vectorized function. */
5340 new_fndecl = build_decl (BUILTINS_LOCATION,
5341 FUNCTION_DECL, get_identifier (name), fntype);
5342 TREE_PUBLIC (new_fndecl) = 1;
5343 DECL_EXTERNAL (new_fndecl) = 1;
5344 DECL_IS_NOVOPS (new_fndecl) = 1;
5345 TREE_READONLY (new_fndecl) = 1;
5347 return new_fndecl;
5350 /* Returns a function decl for a vectorized version of the builtin function
5351 with builtin function code FN and the result vector type TYPE, or NULL_TREE
5352 if it is not available. */
5354 static tree
5355 rs6000_builtin_vectorized_function (unsigned int fn, tree type_out,
5356 tree type_in)
5358 machine_mode in_mode, out_mode;
5359 int in_n, out_n;
5361 if (TARGET_DEBUG_BUILTIN)
5362 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
5363 combined_fn_name (combined_fn (fn)),
5364 GET_MODE_NAME (TYPE_MODE (type_out)),
5365 GET_MODE_NAME (TYPE_MODE (type_in)));
5367 if (TREE_CODE (type_out) != VECTOR_TYPE
5368 || TREE_CODE (type_in) != VECTOR_TYPE)
5369 return NULL_TREE;
5371 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5372 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5373 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5374 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5376 switch (fn)
5378 CASE_CFN_COPYSIGN:
5379 if (VECTOR_UNIT_VSX_P (V2DFmode)
5380 && out_mode == DFmode && out_n == 2
5381 && in_mode == DFmode && in_n == 2)
5382 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
5383 if (VECTOR_UNIT_VSX_P (V4SFmode)
5384 && out_mode == SFmode && out_n == 4
5385 && in_mode == SFmode && in_n == 4)
5386 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
5387 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5388 && out_mode == SFmode && out_n == 4
5389 && in_mode == SFmode && in_n == 4)
5390 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
5391 break;
5392 CASE_CFN_CEIL:
5393 if (VECTOR_UNIT_VSX_P (V2DFmode)
5394 && out_mode == DFmode && out_n == 2
5395 && in_mode == DFmode && in_n == 2)
5396 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
5397 if (VECTOR_UNIT_VSX_P (V4SFmode)
5398 && out_mode == SFmode && out_n == 4
5399 && in_mode == SFmode && in_n == 4)
5400 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
5401 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5402 && out_mode == SFmode && out_n == 4
5403 && in_mode == SFmode && in_n == 4)
5404 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
5405 break;
5406 CASE_CFN_FLOOR:
5407 if (VECTOR_UNIT_VSX_P (V2DFmode)
5408 && out_mode == DFmode && out_n == 2
5409 && in_mode == DFmode && in_n == 2)
5410 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
5411 if (VECTOR_UNIT_VSX_P (V4SFmode)
5412 && out_mode == SFmode && out_n == 4
5413 && in_mode == SFmode && in_n == 4)
5414 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
5415 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5416 && out_mode == SFmode && out_n == 4
5417 && in_mode == SFmode && in_n == 4)
5418 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
5419 break;
5420 CASE_CFN_FMA:
5421 if (VECTOR_UNIT_VSX_P (V2DFmode)
5422 && out_mode == DFmode && out_n == 2
5423 && in_mode == DFmode && in_n == 2)
5424 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
5425 if (VECTOR_UNIT_VSX_P (V4SFmode)
5426 && out_mode == SFmode && out_n == 4
5427 && in_mode == SFmode && in_n == 4)
5428 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
5429 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5430 && out_mode == SFmode && out_n == 4
5431 && in_mode == SFmode && in_n == 4)
5432 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
5433 break;
5434 CASE_CFN_TRUNC:
5435 if (VECTOR_UNIT_VSX_P (V2DFmode)
5436 && out_mode == DFmode && out_n == 2
5437 && in_mode == DFmode && in_n == 2)
5438 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
5439 if (VECTOR_UNIT_VSX_P (V4SFmode)
5440 && out_mode == SFmode && out_n == 4
5441 && in_mode == SFmode && in_n == 4)
5442 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
5443 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5444 && out_mode == SFmode && out_n == 4
5445 && in_mode == SFmode && in_n == 4)
5446 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
5447 break;
5448 CASE_CFN_NEARBYINT:
5449 if (VECTOR_UNIT_VSX_P (V2DFmode)
5450 && flag_unsafe_math_optimizations
5451 && out_mode == DFmode && out_n == 2
5452 && in_mode == DFmode && in_n == 2)
5453 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
5454 if (VECTOR_UNIT_VSX_P (V4SFmode)
5455 && flag_unsafe_math_optimizations
5456 && out_mode == SFmode && out_n == 4
5457 && in_mode == SFmode && in_n == 4)
5458 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
5459 break;
5460 CASE_CFN_RINT:
5461 if (VECTOR_UNIT_VSX_P (V2DFmode)
5462 && !flag_trapping_math
5463 && out_mode == DFmode && out_n == 2
5464 && in_mode == DFmode && in_n == 2)
5465 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
5466 if (VECTOR_UNIT_VSX_P (V4SFmode)
5467 && !flag_trapping_math
5468 && out_mode == SFmode && out_n == 4
5469 && in_mode == SFmode && in_n == 4)
5470 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
5471 break;
5472 default:
5473 break;
5476 /* Generate calls to libmass if appropriate. */
5477 if (rs6000_veclib_handler)
5478 return rs6000_veclib_handler (combined_fn (fn), type_out, type_in);
5480 return NULL_TREE;
5483 /* Implement TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION. */
5485 static tree
5486 rs6000_builtin_md_vectorized_function (tree fndecl, tree type_out,
5487 tree type_in)
5489 machine_mode in_mode, out_mode;
5490 int in_n, out_n;
5492 if (TARGET_DEBUG_BUILTIN)
5493 fprintf (stderr, "rs6000_builtin_md_vectorized_function (%s, %s, %s)\n",
5494 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
5495 GET_MODE_NAME (TYPE_MODE (type_out)),
5496 GET_MODE_NAME (TYPE_MODE (type_in)));
5498 if (TREE_CODE (type_out) != VECTOR_TYPE
5499 || TREE_CODE (type_in) != VECTOR_TYPE)
5500 return NULL_TREE;
5502 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5503 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5504 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5505 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5507 enum rs6000_builtins fn
5508 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
5509 switch (fn)
5511 case RS6000_BUILTIN_RSQRTF:
5512 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5513 && out_mode == SFmode && out_n == 4
5514 && in_mode == SFmode && in_n == 4)
5515 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
5516 break;
5517 case RS6000_BUILTIN_RSQRT:
5518 if (VECTOR_UNIT_VSX_P (V2DFmode)
5519 && out_mode == DFmode && out_n == 2
5520 && in_mode == DFmode && in_n == 2)
5521 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
5522 break;
5523 case RS6000_BUILTIN_RECIPF:
5524 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5525 && out_mode == SFmode && out_n == 4
5526 && in_mode == SFmode && in_n == 4)
5527 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
5528 break;
5529 case RS6000_BUILTIN_RECIP:
5530 if (VECTOR_UNIT_VSX_P (V2DFmode)
5531 && out_mode == DFmode && out_n == 2
5532 && in_mode == DFmode && in_n == 2)
5533 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
5534 break;
5535 default:
5536 break;
5538 return NULL_TREE;
5541 /* Default CPU string for rs6000*_file_start functions. */
5542 static const char *rs6000_default_cpu;
5544 #ifdef USING_ELFOS_H
5545 const char *rs6000_machine;
5547 const char *
5548 rs6000_machine_from_flags (void)
5550 HOST_WIDE_INT flags = rs6000_isa_flags;
5552 /* Disable the flags that should never influence the .machine selection. */
5553 flags &= ~(OPTION_MASK_PPC_GFXOPT | OPTION_MASK_PPC_GPOPT);
5555 if ((flags & (ISA_FUTURE_MASKS_SERVER & ~ISA_3_0_MASKS_SERVER)) != 0)
5556 return "future";
5557 if ((flags & (ISA_3_0_MASKS_SERVER & ~ISA_2_7_MASKS_SERVER)) != 0)
5558 return "power9";
5559 if ((flags & (ISA_2_7_MASKS_SERVER & ~ISA_2_6_MASKS_SERVER)) != 0)
5560 return "power8";
5561 if ((flags & (ISA_2_6_MASKS_SERVER & ~ISA_2_5_MASKS_SERVER)) != 0)
5562 return "power7";
5563 if ((flags & (ISA_2_5_MASKS_SERVER & ~ISA_2_4_MASKS)) != 0)
5564 return "power6";
5565 if ((flags & (ISA_2_4_MASKS & ~ISA_2_1_MASKS)) != 0)
5566 return "power5";
5567 if ((flags & ISA_2_1_MASKS) != 0)
5568 return "power4";
5569 if ((flags & OPTION_MASK_POWERPC64) != 0)
5570 return "ppc64";
5571 return "ppc";
5574 void
5575 emit_asm_machine (void)
5577 fprintf (asm_out_file, "\t.machine %s\n", rs6000_machine);
5579 #endif
5581 /* Do anything needed at the start of the asm file. */
5583 static void
5584 rs6000_file_start (void)
5586 char buffer[80];
5587 const char *start = buffer;
5588 FILE *file = asm_out_file;
5590 rs6000_default_cpu = TARGET_CPU_DEFAULT;
5592 default_file_start ();
5594 if (flag_verbose_asm)
5596 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
5598 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
5600 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
5601 start = "";
5604 if (global_options_set.x_rs6000_cpu_index)
5606 fprintf (file, "%s -mcpu=%s", start,
5607 processor_target_table[rs6000_cpu_index].name);
5608 start = "";
5611 if (global_options_set.x_rs6000_tune_index)
5613 fprintf (file, "%s -mtune=%s", start,
5614 processor_target_table[rs6000_tune_index].name);
5615 start = "";
5618 if (PPC405_ERRATUM77)
5620 fprintf (file, "%s PPC405CR_ERRATUM77", start);
5621 start = "";
5624 #ifdef USING_ELFOS_H
5625 switch (rs6000_sdata)
5627 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
5628 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
5629 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
5630 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
5633 if (rs6000_sdata && g_switch_value)
5635 fprintf (file, "%s -G %d", start,
5636 g_switch_value);
5637 start = "";
5639 #endif
5641 if (*start == '\0')
5642 putc ('\n', file);
5645 #ifdef USING_ELFOS_H
5646 rs6000_machine = rs6000_machine_from_flags ();
5647 if (!(rs6000_default_cpu && rs6000_default_cpu[0])
5648 && !global_options_set.x_rs6000_cpu_index)
5649 emit_asm_machine ();
5650 #endif
5652 if (DEFAULT_ABI == ABI_ELFv2)
5653 fprintf (file, "\t.abiversion 2\n");
5657 /* Return nonzero if this function is known to have a null epilogue. */
5660 direct_return (void)
5662 if (reload_completed)
5664 rs6000_stack_t *info = rs6000_stack_info ();
5666 if (info->first_gp_reg_save == 32
5667 && info->first_fp_reg_save == 64
5668 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
5669 && ! info->lr_save_p
5670 && ! info->cr_save_p
5671 && info->vrsave_size == 0
5672 && ! info->push_p)
5673 return 1;
5676 return 0;
5679 /* Helper for num_insns_constant. Calculate number of instructions to
5680 load VALUE to a single gpr using combinations of addi, addis, ori,
5681 oris and sldi instructions. */
5683 static int
5684 num_insns_constant_gpr (HOST_WIDE_INT value)
5686 /* signed constant loadable with addi */
5687 if (((unsigned HOST_WIDE_INT) value + 0x8000) < 0x10000)
5688 return 1;
5690 /* constant loadable with addis */
5691 else if ((value & 0xffff) == 0
5692 && (value >> 31 == -1 || value >> 31 == 0))
5693 return 1;
5695 else if (TARGET_POWERPC64)
5697 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
5698 HOST_WIDE_INT high = value >> 31;
5700 if (high == 0 || high == -1)
5701 return 2;
5703 high >>= 1;
5705 if (low == 0)
5706 return num_insns_constant_gpr (high) + 1;
5707 else if (high == 0)
5708 return num_insns_constant_gpr (low) + 1;
5709 else
5710 return (num_insns_constant_gpr (high)
5711 + num_insns_constant_gpr (low) + 1);
5714 else
5715 return 2;
5718 /* Helper for num_insns_constant. Allow constants formed by the
5719 num_insns_constant_gpr sequences, plus li -1, rldicl/rldicr/rlwinm,
5720 and handle modes that require multiple gprs. */
5722 static int
5723 num_insns_constant_multi (HOST_WIDE_INT value, machine_mode mode)
5725 int nregs = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5726 int total = 0;
5727 while (nregs-- > 0)
5729 HOST_WIDE_INT low = sext_hwi (value, BITS_PER_WORD);
5730 int insns = num_insns_constant_gpr (low);
5731 if (insns > 2
5732 /* We won't get more than 2 from num_insns_constant_gpr
5733 except when TARGET_POWERPC64 and mode is DImode or
5734 wider, so the register mode must be DImode. */
5735 && rs6000_is_valid_and_mask (GEN_INT (low), DImode))
5736 insns = 2;
5737 total += insns;
5738 value >>= BITS_PER_WORD;
5740 return total;
5743 /* Return the number of instructions it takes to form a constant in as
5744 many gprs are needed for MODE. */
5747 num_insns_constant (rtx op, machine_mode mode)
5749 HOST_WIDE_INT val;
5751 switch (GET_CODE (op))
5753 case CONST_INT:
5754 val = INTVAL (op);
5755 break;
5757 case CONST_WIDE_INT:
5759 int insns = 0;
5760 for (int i = 0; i < CONST_WIDE_INT_NUNITS (op); i++)
5761 insns += num_insns_constant_multi (CONST_WIDE_INT_ELT (op, i),
5762 DImode);
5763 return insns;
5766 case CONST_DOUBLE:
5768 const struct real_value *rv = CONST_DOUBLE_REAL_VALUE (op);
5770 if (mode == SFmode || mode == SDmode)
5772 long l;
5774 if (mode == SDmode)
5775 REAL_VALUE_TO_TARGET_DECIMAL32 (*rv, l);
5776 else
5777 REAL_VALUE_TO_TARGET_SINGLE (*rv, l);
5778 /* See the first define_split in rs6000.md handling a
5779 const_double_operand. */
5780 val = l;
5781 mode = SImode;
5783 else if (mode == DFmode || mode == DDmode)
5785 long l[2];
5787 if (mode == DDmode)
5788 REAL_VALUE_TO_TARGET_DECIMAL64 (*rv, l);
5789 else
5790 REAL_VALUE_TO_TARGET_DOUBLE (*rv, l);
5792 /* See the second (32-bit) and third (64-bit) define_split
5793 in rs6000.md handling a const_double_operand. */
5794 val = (unsigned HOST_WIDE_INT) l[WORDS_BIG_ENDIAN ? 0 : 1] << 32;
5795 val |= l[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffffUL;
5796 mode = DImode;
5798 else if (mode == TFmode || mode == TDmode
5799 || mode == KFmode || mode == IFmode)
5801 long l[4];
5802 int insns;
5804 if (mode == TDmode)
5805 REAL_VALUE_TO_TARGET_DECIMAL128 (*rv, l);
5806 else
5807 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*rv, l);
5809 val = (unsigned HOST_WIDE_INT) l[WORDS_BIG_ENDIAN ? 0 : 3] << 32;
5810 val |= l[WORDS_BIG_ENDIAN ? 1 : 2] & 0xffffffffUL;
5811 insns = num_insns_constant_multi (val, DImode);
5812 val = (unsigned HOST_WIDE_INT) l[WORDS_BIG_ENDIAN ? 2 : 1] << 32;
5813 val |= l[WORDS_BIG_ENDIAN ? 3 : 0] & 0xffffffffUL;
5814 insns += num_insns_constant_multi (val, DImode);
5815 return insns;
5817 else
5818 gcc_unreachable ();
5820 break;
5822 default:
5823 gcc_unreachable ();
5826 return num_insns_constant_multi (val, mode);
5829 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
5830 If the mode of OP is MODE_VECTOR_INT, this simply returns the
5831 corresponding element of the vector, but for V4SFmode, the
5832 corresponding "float" is interpreted as an SImode integer. */
5834 HOST_WIDE_INT
5835 const_vector_elt_as_int (rtx op, unsigned int elt)
5837 rtx tmp;
5839 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
5840 gcc_assert (GET_MODE (op) != V2DImode
5841 && GET_MODE (op) != V2DFmode);
5843 tmp = CONST_VECTOR_ELT (op, elt);
5844 if (GET_MODE (op) == V4SFmode)
5845 tmp = gen_lowpart (SImode, tmp);
5846 return INTVAL (tmp);
5849 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
5850 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
5851 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
5852 all items are set to the same value and contain COPIES replicas of the
5853 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
5854 operand and the others are set to the value of the operand's msb. */
5856 static bool
5857 vspltis_constant (rtx op, unsigned step, unsigned copies)
5859 machine_mode mode = GET_MODE (op);
5860 machine_mode inner = GET_MODE_INNER (mode);
5862 unsigned i;
5863 unsigned nunits;
5864 unsigned bitsize;
5865 unsigned mask;
5867 HOST_WIDE_INT val;
5868 HOST_WIDE_INT splat_val;
5869 HOST_WIDE_INT msb_val;
5871 if (mode == V2DImode || mode == V2DFmode || mode == V1TImode)
5872 return false;
5874 nunits = GET_MODE_NUNITS (mode);
5875 bitsize = GET_MODE_BITSIZE (inner);
5876 mask = GET_MODE_MASK (inner);
5878 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
5879 splat_val = val;
5880 msb_val = val >= 0 ? 0 : -1;
5882 /* Construct the value to be splatted, if possible. If not, return 0. */
5883 for (i = 2; i <= copies; i *= 2)
5885 HOST_WIDE_INT small_val;
5886 bitsize /= 2;
5887 small_val = splat_val >> bitsize;
5888 mask >>= bitsize;
5889 if (splat_val != ((HOST_WIDE_INT)
5890 ((unsigned HOST_WIDE_INT) small_val << bitsize)
5891 | (small_val & mask)))
5892 return false;
5893 splat_val = small_val;
5896 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
5897 if (EASY_VECTOR_15 (splat_val))
5900 /* Also check if we can splat, and then add the result to itself. Do so if
5901 the value is positive, of if the splat instruction is using OP's mode;
5902 for splat_val < 0, the splat and the add should use the same mode. */
5903 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
5904 && (splat_val >= 0 || (step == 1 && copies == 1)))
5907 /* Also check if are loading up the most significant bit which can be done by
5908 loading up -1 and shifting the value left by -1. */
5909 else if (EASY_VECTOR_MSB (splat_val, inner))
5912 else
5913 return false;
5915 /* Check if VAL is present in every STEP-th element, and the
5916 other elements are filled with its most significant bit. */
5917 for (i = 1; i < nunits; ++i)
5919 HOST_WIDE_INT desired_val;
5920 unsigned elt = BYTES_BIG_ENDIAN ? nunits - 1 - i : i;
5921 if ((i & (step - 1)) == 0)
5922 desired_val = val;
5923 else
5924 desired_val = msb_val;
5926 if (desired_val != const_vector_elt_as_int (op, elt))
5927 return false;
5930 return true;
5933 /* Like vsplitis_constant, but allow the value to be shifted left with a VSLDOI
5934 instruction, filling in the bottom elements with 0 or -1.
5936 Return 0 if the constant cannot be generated with VSLDOI. Return positive
5937 for the number of zeroes to shift in, or negative for the number of 0xff
5938 bytes to shift in.
5940 OP is a CONST_VECTOR. */
5943 vspltis_shifted (rtx op)
5945 machine_mode mode = GET_MODE (op);
5946 machine_mode inner = GET_MODE_INNER (mode);
5948 unsigned i, j;
5949 unsigned nunits;
5950 unsigned mask;
5952 HOST_WIDE_INT val;
5954 if (mode != V16QImode && mode != V8HImode && mode != V4SImode)
5955 return false;
5957 /* We need to create pseudo registers to do the shift, so don't recognize
5958 shift vector constants after reload. */
5959 if (!can_create_pseudo_p ())
5960 return false;
5962 nunits = GET_MODE_NUNITS (mode);
5963 mask = GET_MODE_MASK (inner);
5965 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? 0 : nunits - 1);
5967 /* Check if the value can really be the operand of a vspltis[bhw]. */
5968 if (EASY_VECTOR_15 (val))
5971 /* Also check if we are loading up the most significant bit which can be done
5972 by loading up -1 and shifting the value left by -1. */
5973 else if (EASY_VECTOR_MSB (val, inner))
5976 else
5977 return 0;
5979 /* Check if VAL is present in every STEP-th element until we find elements
5980 that are 0 or all 1 bits. */
5981 for (i = 1; i < nunits; ++i)
5983 unsigned elt = BYTES_BIG_ENDIAN ? i : nunits - 1 - i;
5984 HOST_WIDE_INT elt_val = const_vector_elt_as_int (op, elt);
5986 /* If the value isn't the splat value, check for the remaining elements
5987 being 0/-1. */
5988 if (val != elt_val)
5990 if (elt_val == 0)
5992 for (j = i+1; j < nunits; ++j)
5994 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
5995 if (const_vector_elt_as_int (op, elt2) != 0)
5996 return 0;
5999 return (nunits - i) * GET_MODE_SIZE (inner);
6002 else if ((elt_val & mask) == mask)
6004 for (j = i+1; j < nunits; ++j)
6006 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6007 if ((const_vector_elt_as_int (op, elt2) & mask) != mask)
6008 return 0;
6011 return -((nunits - i) * GET_MODE_SIZE (inner));
6014 else
6015 return 0;
6019 /* If all elements are equal, we don't need to do VLSDOI. */
6020 return 0;
6024 /* Return true if OP is of the given MODE and can be synthesized
6025 with a vspltisb, vspltish or vspltisw. */
6027 bool
6028 easy_altivec_constant (rtx op, machine_mode mode)
6030 unsigned step, copies;
6032 if (mode == VOIDmode)
6033 mode = GET_MODE (op);
6034 else if (mode != GET_MODE (op))
6035 return false;
6037 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
6038 constants. */
6039 if (mode == V2DFmode)
6040 return zero_constant (op, mode);
6042 else if (mode == V2DImode)
6044 if (!CONST_INT_P (CONST_VECTOR_ELT (op, 0))
6045 || !CONST_INT_P (CONST_VECTOR_ELT (op, 1)))
6046 return false;
6048 if (zero_constant (op, mode))
6049 return true;
6051 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
6052 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
6053 return true;
6055 return false;
6058 /* V1TImode is a special container for TImode. Ignore for now. */
6059 else if (mode == V1TImode)
6060 return false;
6062 /* Start with a vspltisw. */
6063 step = GET_MODE_NUNITS (mode) / 4;
6064 copies = 1;
6066 if (vspltis_constant (op, step, copies))
6067 return true;
6069 /* Then try with a vspltish. */
6070 if (step == 1)
6071 copies <<= 1;
6072 else
6073 step >>= 1;
6075 if (vspltis_constant (op, step, copies))
6076 return true;
6078 /* And finally a vspltisb. */
6079 if (step == 1)
6080 copies <<= 1;
6081 else
6082 step >>= 1;
6084 if (vspltis_constant (op, step, copies))
6085 return true;
6087 if (vspltis_shifted (op) != 0)
6088 return true;
6090 return false;
6093 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
6094 result is OP. Abort if it is not possible. */
6097 gen_easy_altivec_constant (rtx op)
6099 machine_mode mode = GET_MODE (op);
6100 int nunits = GET_MODE_NUNITS (mode);
6101 rtx val = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6102 unsigned step = nunits / 4;
6103 unsigned copies = 1;
6105 /* Start with a vspltisw. */
6106 if (vspltis_constant (op, step, copies))
6107 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, val));
6109 /* Then try with a vspltish. */
6110 if (step == 1)
6111 copies <<= 1;
6112 else
6113 step >>= 1;
6115 if (vspltis_constant (op, step, copies))
6116 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, val));
6118 /* And finally a vspltisb. */
6119 if (step == 1)
6120 copies <<= 1;
6121 else
6122 step >>= 1;
6124 if (vspltis_constant (op, step, copies))
6125 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, val));
6127 gcc_unreachable ();
6130 /* Return true if OP is of the given MODE and can be synthesized with ISA 3.0
6131 instructions (xxspltib, vupkhsb/vextsb2w/vextb2d).
6133 Return the number of instructions needed (1 or 2) into the address pointed
6134 via NUM_INSNS_PTR.
6136 Return the constant that is being split via CONSTANT_PTR. */
6138 bool
6139 xxspltib_constant_p (rtx op,
6140 machine_mode mode,
6141 int *num_insns_ptr,
6142 int *constant_ptr)
6144 size_t nunits = GET_MODE_NUNITS (mode);
6145 size_t i;
6146 HOST_WIDE_INT value;
6147 rtx element;
6149 /* Set the returned values to out of bound values. */
6150 *num_insns_ptr = -1;
6151 *constant_ptr = 256;
6153 if (!TARGET_P9_VECTOR)
6154 return false;
6156 if (mode == VOIDmode)
6157 mode = GET_MODE (op);
6159 else if (mode != GET_MODE (op) && GET_MODE (op) != VOIDmode)
6160 return false;
6162 /* Handle (vec_duplicate <constant>). */
6163 if (GET_CODE (op) == VEC_DUPLICATE)
6165 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6166 && mode != V2DImode)
6167 return false;
6169 element = XEXP (op, 0);
6170 if (!CONST_INT_P (element))
6171 return false;
6173 value = INTVAL (element);
6174 if (!IN_RANGE (value, -128, 127))
6175 return false;
6178 /* Handle (const_vector [...]). */
6179 else if (GET_CODE (op) == CONST_VECTOR)
6181 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6182 && mode != V2DImode)
6183 return false;
6185 element = CONST_VECTOR_ELT (op, 0);
6186 if (!CONST_INT_P (element))
6187 return false;
6189 value = INTVAL (element);
6190 if (!IN_RANGE (value, -128, 127))
6191 return false;
6193 for (i = 1; i < nunits; i++)
6195 element = CONST_VECTOR_ELT (op, i);
6196 if (!CONST_INT_P (element))
6197 return false;
6199 if (value != INTVAL (element))
6200 return false;
6204 /* Handle integer constants being loaded into the upper part of the VSX
6205 register as a scalar. If the value isn't 0/-1, only allow it if the mode
6206 can go in Altivec registers. Prefer VSPLTISW/VUPKHSW over XXSPLITIB. */
6207 else if (CONST_INT_P (op))
6209 if (!SCALAR_INT_MODE_P (mode))
6210 return false;
6212 value = INTVAL (op);
6213 if (!IN_RANGE (value, -128, 127))
6214 return false;
6216 if (!IN_RANGE (value, -1, 0))
6218 if (!(reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID))
6219 return false;
6221 if (EASY_VECTOR_15 (value))
6222 return false;
6226 else
6227 return false;
6229 /* See if we could generate vspltisw/vspltish directly instead of xxspltib +
6230 sign extend. Special case 0/-1 to allow getting any VSX register instead
6231 of an Altivec register. */
6232 if ((mode == V4SImode || mode == V8HImode) && !IN_RANGE (value, -1, 0)
6233 && EASY_VECTOR_15 (value))
6234 return false;
6236 /* Return # of instructions and the constant byte for XXSPLTIB. */
6237 if (mode == V16QImode)
6238 *num_insns_ptr = 1;
6240 else if (IN_RANGE (value, -1, 0))
6241 *num_insns_ptr = 1;
6243 else
6244 *num_insns_ptr = 2;
6246 *constant_ptr = (int) value;
6247 return true;
6250 const char *
6251 output_vec_const_move (rtx *operands)
6253 int shift;
6254 machine_mode mode;
6255 rtx dest, vec;
6257 dest = operands[0];
6258 vec = operands[1];
6259 mode = GET_MODE (dest);
6261 if (TARGET_VSX)
6263 bool dest_vmx_p = ALTIVEC_REGNO_P (REGNO (dest));
6264 int xxspltib_value = 256;
6265 int num_insns = -1;
6267 if (zero_constant (vec, mode))
6269 if (TARGET_P9_VECTOR)
6270 return "xxspltib %x0,0";
6272 else if (dest_vmx_p)
6273 return "vspltisw %0,0";
6275 else
6276 return "xxlxor %x0,%x0,%x0";
6279 if (all_ones_constant (vec, mode))
6281 if (TARGET_P9_VECTOR)
6282 return "xxspltib %x0,255";
6284 else if (dest_vmx_p)
6285 return "vspltisw %0,-1";
6287 else if (TARGET_P8_VECTOR)
6288 return "xxlorc %x0,%x0,%x0";
6290 else
6291 gcc_unreachable ();
6294 if (TARGET_P9_VECTOR
6295 && xxspltib_constant_p (vec, mode, &num_insns, &xxspltib_value))
6297 if (num_insns == 1)
6299 operands[2] = GEN_INT (xxspltib_value & 0xff);
6300 return "xxspltib %x0,%2";
6303 return "#";
6307 if (TARGET_ALTIVEC)
6309 rtx splat_vec;
6311 gcc_assert (ALTIVEC_REGNO_P (REGNO (dest)));
6312 if (zero_constant (vec, mode))
6313 return "vspltisw %0,0";
6315 if (all_ones_constant (vec, mode))
6316 return "vspltisw %0,-1";
6318 /* Do we need to construct a value using VSLDOI? */
6319 shift = vspltis_shifted (vec);
6320 if (shift != 0)
6321 return "#";
6323 splat_vec = gen_easy_altivec_constant (vec);
6324 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
6325 operands[1] = XEXP (splat_vec, 0);
6326 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
6327 return "#";
6329 switch (GET_MODE (splat_vec))
6331 case E_V4SImode:
6332 return "vspltisw %0,%1";
6334 case E_V8HImode:
6335 return "vspltish %0,%1";
6337 case E_V16QImode:
6338 return "vspltisb %0,%1";
6340 default:
6341 gcc_unreachable ();
6345 gcc_unreachable ();
6348 /* Initialize vector TARGET to VALS. */
6350 void
6351 rs6000_expand_vector_init (rtx target, rtx vals)
6353 machine_mode mode = GET_MODE (target);
6354 machine_mode inner_mode = GET_MODE_INNER (mode);
6355 int n_elts = GET_MODE_NUNITS (mode);
6356 int n_var = 0, one_var = -1;
6357 bool all_same = true, all_const_zero = true;
6358 rtx x, mem;
6359 int i;
6361 for (i = 0; i < n_elts; ++i)
6363 x = XVECEXP (vals, 0, i);
6364 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6365 ++n_var, one_var = i;
6366 else if (x != CONST0_RTX (inner_mode))
6367 all_const_zero = false;
6369 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6370 all_same = false;
6373 if (n_var == 0)
6375 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
6376 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
6377 if ((int_vector_p || TARGET_VSX) && all_const_zero)
6379 /* Zero register. */
6380 emit_move_insn (target, CONST0_RTX (mode));
6381 return;
6383 else if (int_vector_p && easy_vector_constant (const_vec, mode))
6385 /* Splat immediate. */
6386 emit_insn (gen_rtx_SET (target, const_vec));
6387 return;
6389 else
6391 /* Load from constant pool. */
6392 emit_move_insn (target, const_vec);
6393 return;
6397 /* Double word values on VSX can use xxpermdi or lxvdsx. */
6398 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
6400 rtx op[2];
6401 size_t i;
6402 size_t num_elements = all_same ? 1 : 2;
6403 for (i = 0; i < num_elements; i++)
6405 op[i] = XVECEXP (vals, 0, i);
6406 /* Just in case there is a SUBREG with a smaller mode, do a
6407 conversion. */
6408 if (GET_MODE (op[i]) != inner_mode)
6410 rtx tmp = gen_reg_rtx (inner_mode);
6411 convert_move (tmp, op[i], 0);
6412 op[i] = tmp;
6414 /* Allow load with splat double word. */
6415 else if (MEM_P (op[i]))
6417 if (!all_same)
6418 op[i] = force_reg (inner_mode, op[i]);
6420 else if (!REG_P (op[i]))
6421 op[i] = force_reg (inner_mode, op[i]);
6424 if (all_same)
6426 if (mode == V2DFmode)
6427 emit_insn (gen_vsx_splat_v2df (target, op[0]));
6428 else
6429 emit_insn (gen_vsx_splat_v2di (target, op[0]));
6431 else
6433 if (mode == V2DFmode)
6434 emit_insn (gen_vsx_concat_v2df (target, op[0], op[1]));
6435 else
6436 emit_insn (gen_vsx_concat_v2di (target, op[0], op[1]));
6438 return;
6441 /* Special case initializing vector int if we are on 64-bit systems with
6442 direct move or we have the ISA 3.0 instructions. */
6443 if (mode == V4SImode && VECTOR_MEM_VSX_P (V4SImode)
6444 && TARGET_DIRECT_MOVE_64BIT)
6446 if (all_same)
6448 rtx element0 = XVECEXP (vals, 0, 0);
6449 if (MEM_P (element0))
6450 element0 = rs6000_force_indexed_or_indirect_mem (element0);
6451 else
6452 element0 = force_reg (SImode, element0);
6454 if (TARGET_P9_VECTOR)
6455 emit_insn (gen_vsx_splat_v4si (target, element0));
6456 else
6458 rtx tmp = gen_reg_rtx (DImode);
6459 emit_insn (gen_zero_extendsidi2 (tmp, element0));
6460 emit_insn (gen_vsx_splat_v4si_di (target, tmp));
6462 return;
6464 else
6466 rtx elements[4];
6467 size_t i;
6469 for (i = 0; i < 4; i++)
6470 elements[i] = force_reg (SImode, XVECEXP (vals, 0, i));
6472 emit_insn (gen_vsx_init_v4si (target, elements[0], elements[1],
6473 elements[2], elements[3]));
6474 return;
6478 /* With single precision floating point on VSX, know that internally single
6479 precision is actually represented as a double, and either make 2 V2DF
6480 vectors, and convert these vectors to single precision, or do one
6481 conversion, and splat the result to the other elements. */
6482 if (mode == V4SFmode && VECTOR_MEM_VSX_P (V4SFmode))
6484 if (all_same)
6486 rtx element0 = XVECEXP (vals, 0, 0);
6488 if (TARGET_P9_VECTOR)
6490 if (MEM_P (element0))
6491 element0 = rs6000_force_indexed_or_indirect_mem (element0);
6493 emit_insn (gen_vsx_splat_v4sf (target, element0));
6496 else
6498 rtx freg = gen_reg_rtx (V4SFmode);
6499 rtx sreg = force_reg (SFmode, element0);
6500 rtx cvt = (TARGET_XSCVDPSPN
6501 ? gen_vsx_xscvdpspn_scalar (freg, sreg)
6502 : gen_vsx_xscvdpsp_scalar (freg, sreg));
6504 emit_insn (cvt);
6505 emit_insn (gen_vsx_xxspltw_v4sf_direct (target, freg,
6506 const0_rtx));
6509 else
6511 rtx dbl_even = gen_reg_rtx (V2DFmode);
6512 rtx dbl_odd = gen_reg_rtx (V2DFmode);
6513 rtx flt_even = gen_reg_rtx (V4SFmode);
6514 rtx flt_odd = gen_reg_rtx (V4SFmode);
6515 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
6516 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
6517 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
6518 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
6520 /* Use VMRGEW if we can instead of doing a permute. */
6521 if (TARGET_P8_VECTOR)
6523 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op2));
6524 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op1, op3));
6525 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
6526 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
6527 if (BYTES_BIG_ENDIAN)
6528 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_even, flt_odd));
6529 else
6530 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_odd, flt_even));
6532 else
6534 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
6535 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
6536 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
6537 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
6538 rs6000_expand_extract_even (target, flt_even, flt_odd);
6541 return;
6544 /* Special case initializing vector short/char that are splats if we are on
6545 64-bit systems with direct move. */
6546 if (all_same && TARGET_DIRECT_MOVE_64BIT
6547 && (mode == V16QImode || mode == V8HImode))
6549 rtx op0 = XVECEXP (vals, 0, 0);
6550 rtx di_tmp = gen_reg_rtx (DImode);
6552 if (!REG_P (op0))
6553 op0 = force_reg (GET_MODE_INNER (mode), op0);
6555 if (mode == V16QImode)
6557 emit_insn (gen_zero_extendqidi2 (di_tmp, op0));
6558 emit_insn (gen_vsx_vspltb_di (target, di_tmp));
6559 return;
6562 if (mode == V8HImode)
6564 emit_insn (gen_zero_extendhidi2 (di_tmp, op0));
6565 emit_insn (gen_vsx_vsplth_di (target, di_tmp));
6566 return;
6570 /* Store value to stack temp. Load vector element. Splat. However, splat
6571 of 64-bit items is not supported on Altivec. */
6572 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
6574 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
6575 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
6576 XVECEXP (vals, 0, 0));
6577 x = gen_rtx_UNSPEC (VOIDmode,
6578 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
6579 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6580 gen_rtvec (2,
6581 gen_rtx_SET (target, mem),
6582 x)));
6583 x = gen_rtx_VEC_SELECT (inner_mode, target,
6584 gen_rtx_PARALLEL (VOIDmode,
6585 gen_rtvec (1, const0_rtx)));
6586 emit_insn (gen_rtx_SET (target, gen_rtx_VEC_DUPLICATE (mode, x)));
6587 return;
6590 /* One field is non-constant. Load constant then overwrite
6591 varying field. */
6592 if (n_var == 1)
6594 rtx copy = copy_rtx (vals);
6596 /* Load constant part of vector, substitute neighboring value for
6597 varying element. */
6598 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
6599 rs6000_expand_vector_init (target, copy);
6601 /* Insert variable. */
6602 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
6603 return;
6606 /* Construct the vector in memory one field at a time
6607 and load the whole vector. */
6608 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
6609 for (i = 0; i < n_elts; i++)
6610 emit_move_insn (adjust_address_nv (mem, inner_mode,
6611 i * GET_MODE_SIZE (inner_mode)),
6612 XVECEXP (vals, 0, i));
6613 emit_move_insn (target, mem);
6616 /* Set field ELT of TARGET to VAL. */
6618 void
6619 rs6000_expand_vector_set (rtx target, rtx val, int elt)
6621 machine_mode mode = GET_MODE (target);
6622 machine_mode inner_mode = GET_MODE_INNER (mode);
6623 rtx reg = gen_reg_rtx (mode);
6624 rtx mask, mem, x;
6625 int width = GET_MODE_SIZE (inner_mode);
6626 int i;
6628 val = force_reg (GET_MODE (val), val);
6630 if (VECTOR_MEM_VSX_P (mode))
6632 rtx insn = NULL_RTX;
6633 rtx elt_rtx = GEN_INT (elt);
6635 if (mode == V2DFmode)
6636 insn = gen_vsx_set_v2df (target, target, val, elt_rtx);
6638 else if (mode == V2DImode)
6639 insn = gen_vsx_set_v2di (target, target, val, elt_rtx);
6641 else if (TARGET_P9_VECTOR && TARGET_POWERPC64)
6643 if (mode == V4SImode)
6644 insn = gen_vsx_set_v4si_p9 (target, target, val, elt_rtx);
6645 else if (mode == V8HImode)
6646 insn = gen_vsx_set_v8hi_p9 (target, target, val, elt_rtx);
6647 else if (mode == V16QImode)
6648 insn = gen_vsx_set_v16qi_p9 (target, target, val, elt_rtx);
6649 else if (mode == V4SFmode)
6650 insn = gen_vsx_set_v4sf_p9 (target, target, val, elt_rtx);
6653 if (insn)
6655 emit_insn (insn);
6656 return;
6660 /* Simplify setting single element vectors like V1TImode. */
6661 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE (inner_mode) && elt == 0)
6663 emit_move_insn (target, gen_lowpart (mode, val));
6664 return;
6667 /* Load single variable value. */
6668 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
6669 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
6670 x = gen_rtx_UNSPEC (VOIDmode,
6671 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
6672 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6673 gen_rtvec (2,
6674 gen_rtx_SET (reg, mem),
6675 x)));
6677 /* Linear sequence. */
6678 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
6679 for (i = 0; i < 16; ++i)
6680 XVECEXP (mask, 0, i) = GEN_INT (i);
6682 /* Set permute mask to insert element into target. */
6683 for (i = 0; i < width; ++i)
6684 XVECEXP (mask, 0, elt*width + i)
6685 = GEN_INT (i + 0x10);
6686 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
6688 if (BYTES_BIG_ENDIAN)
6689 x = gen_rtx_UNSPEC (mode,
6690 gen_rtvec (3, target, reg,
6691 force_reg (V16QImode, x)),
6692 UNSPEC_VPERM);
6693 else
6695 if (TARGET_P9_VECTOR)
6696 x = gen_rtx_UNSPEC (mode,
6697 gen_rtvec (3, reg, target,
6698 force_reg (V16QImode, x)),
6699 UNSPEC_VPERMR);
6700 else
6702 /* Invert selector. We prefer to generate VNAND on P8 so
6703 that future fusion opportunities can kick in, but must
6704 generate VNOR elsewhere. */
6705 rtx notx = gen_rtx_NOT (V16QImode, force_reg (V16QImode, x));
6706 rtx iorx = (TARGET_P8_VECTOR
6707 ? gen_rtx_IOR (V16QImode, notx, notx)
6708 : gen_rtx_AND (V16QImode, notx, notx));
6709 rtx tmp = gen_reg_rtx (V16QImode);
6710 emit_insn (gen_rtx_SET (tmp, iorx));
6712 /* Permute with operands reversed and adjusted selector. */
6713 x = gen_rtx_UNSPEC (mode, gen_rtvec (3, reg, target, tmp),
6714 UNSPEC_VPERM);
6718 emit_insn (gen_rtx_SET (target, x));
6721 /* Extract field ELT from VEC into TARGET. */
6723 void
6724 rs6000_expand_vector_extract (rtx target, rtx vec, rtx elt)
6726 machine_mode mode = GET_MODE (vec);
6727 machine_mode inner_mode = GET_MODE_INNER (mode);
6728 rtx mem;
6730 if (VECTOR_MEM_VSX_P (mode) && CONST_INT_P (elt))
6732 switch (mode)
6734 default:
6735 break;
6736 case E_V1TImode:
6737 emit_move_insn (target, gen_lowpart (TImode, vec));
6738 break;
6739 case E_V2DFmode:
6740 emit_insn (gen_vsx_extract_v2df (target, vec, elt));
6741 return;
6742 case E_V2DImode:
6743 emit_insn (gen_vsx_extract_v2di (target, vec, elt));
6744 return;
6745 case E_V4SFmode:
6746 emit_insn (gen_vsx_extract_v4sf (target, vec, elt));
6747 return;
6748 case E_V16QImode:
6749 if (TARGET_DIRECT_MOVE_64BIT)
6751 emit_insn (gen_vsx_extract_v16qi (target, vec, elt));
6752 return;
6754 else
6755 break;
6756 case E_V8HImode:
6757 if (TARGET_DIRECT_MOVE_64BIT)
6759 emit_insn (gen_vsx_extract_v8hi (target, vec, elt));
6760 return;
6762 else
6763 break;
6764 case E_V4SImode:
6765 if (TARGET_DIRECT_MOVE_64BIT)
6767 emit_insn (gen_vsx_extract_v4si (target, vec, elt));
6768 return;
6770 break;
6773 else if (VECTOR_MEM_VSX_P (mode) && !CONST_INT_P (elt)
6774 && TARGET_DIRECT_MOVE_64BIT)
6776 if (GET_MODE (elt) != DImode)
6778 rtx tmp = gen_reg_rtx (DImode);
6779 convert_move (tmp, elt, 0);
6780 elt = tmp;
6782 else if (!REG_P (elt))
6783 elt = force_reg (DImode, elt);
6785 switch (mode)
6787 case E_V1TImode:
6788 emit_move_insn (target, gen_lowpart (TImode, vec));
6789 return;
6791 case E_V2DFmode:
6792 emit_insn (gen_vsx_extract_v2df_var (target, vec, elt));
6793 return;
6795 case E_V2DImode:
6796 emit_insn (gen_vsx_extract_v2di_var (target, vec, elt));
6797 return;
6799 case E_V4SFmode:
6800 emit_insn (gen_vsx_extract_v4sf_var (target, vec, elt));
6801 return;
6803 case E_V4SImode:
6804 emit_insn (gen_vsx_extract_v4si_var (target, vec, elt));
6805 return;
6807 case E_V8HImode:
6808 emit_insn (gen_vsx_extract_v8hi_var (target, vec, elt));
6809 return;
6811 case E_V16QImode:
6812 emit_insn (gen_vsx_extract_v16qi_var (target, vec, elt));
6813 return;
6815 default:
6816 gcc_unreachable ();
6820 /* Allocate mode-sized buffer. */
6821 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
6823 emit_move_insn (mem, vec);
6824 if (CONST_INT_P (elt))
6826 int modulo_elt = INTVAL (elt) % GET_MODE_NUNITS (mode);
6828 /* Add offset to field within buffer matching vector element. */
6829 mem = adjust_address_nv (mem, inner_mode,
6830 modulo_elt * GET_MODE_SIZE (inner_mode));
6831 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
6833 else
6835 unsigned int ele_size = GET_MODE_SIZE (inner_mode);
6836 rtx num_ele_m1 = GEN_INT (GET_MODE_NUNITS (mode) - 1);
6837 rtx new_addr = gen_reg_rtx (Pmode);
6839 elt = gen_rtx_AND (Pmode, elt, num_ele_m1);
6840 if (ele_size > 1)
6841 elt = gen_rtx_MULT (Pmode, elt, GEN_INT (ele_size));
6842 new_addr = gen_rtx_PLUS (Pmode, XEXP (mem, 0), elt);
6843 new_addr = change_address (mem, inner_mode, new_addr);
6844 emit_move_insn (target, new_addr);
6848 /* Adjust a memory address (MEM) of a vector type to point to a scalar field
6849 within the vector (ELEMENT) with a mode (SCALAR_MODE). Use a base register
6850 temporary (BASE_TMP) to fixup the address. Return the new memory address
6851 that is valid for reads or writes to a given register (SCALAR_REG). */
6854 rs6000_adjust_vec_address (rtx scalar_reg,
6855 rtx mem,
6856 rtx element,
6857 rtx base_tmp,
6858 machine_mode scalar_mode)
6860 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
6861 rtx addr = XEXP (mem, 0);
6862 rtx element_offset;
6863 rtx new_addr;
6864 bool valid_addr_p;
6866 /* Vector addresses should not have PRE_INC, PRE_DEC, or PRE_MODIFY. */
6867 gcc_assert (GET_RTX_CLASS (GET_CODE (addr)) != RTX_AUTOINC);
6869 /* Calculate what we need to add to the address to get the element
6870 address. */
6871 if (CONST_INT_P (element))
6872 element_offset = GEN_INT (INTVAL (element) * scalar_size);
6873 else
6875 int byte_shift = exact_log2 (scalar_size);
6876 gcc_assert (byte_shift >= 0);
6878 if (byte_shift == 0)
6879 element_offset = element;
6881 else
6883 if (TARGET_POWERPC64)
6884 emit_insn (gen_ashldi3 (base_tmp, element, GEN_INT (byte_shift)));
6885 else
6886 emit_insn (gen_ashlsi3 (base_tmp, element, GEN_INT (byte_shift)));
6888 element_offset = base_tmp;
6892 /* Create the new address pointing to the element within the vector. If we
6893 are adding 0, we don't have to change the address. */
6894 if (element_offset == const0_rtx)
6895 new_addr = addr;
6897 /* A simple indirect address can be converted into a reg + offset
6898 address. */
6899 else if (REG_P (addr) || SUBREG_P (addr))
6900 new_addr = gen_rtx_PLUS (Pmode, addr, element_offset);
6902 /* Optimize D-FORM addresses with constant offset with a constant element, to
6903 include the element offset in the address directly. */
6904 else if (GET_CODE (addr) == PLUS)
6906 rtx op0 = XEXP (addr, 0);
6907 rtx op1 = XEXP (addr, 1);
6908 rtx insn;
6910 gcc_assert (REG_P (op0) || SUBREG_P (op0));
6911 if (CONST_INT_P (op1) && CONST_INT_P (element_offset))
6913 HOST_WIDE_INT offset = INTVAL (op1) + INTVAL (element_offset);
6914 rtx offset_rtx = GEN_INT (offset);
6916 if (IN_RANGE (offset, -32768, 32767)
6917 && (scalar_size < 8 || (offset & 0x3) == 0))
6918 new_addr = gen_rtx_PLUS (Pmode, op0, offset_rtx);
6919 else
6921 emit_move_insn (base_tmp, offset_rtx);
6922 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
6925 else
6927 bool op1_reg_p = (REG_P (op1) || SUBREG_P (op1));
6928 bool ele_reg_p = (REG_P (element_offset) || SUBREG_P (element_offset));
6930 /* Note, ADDI requires the register being added to be a base
6931 register. If the register was R0, load it up into the temporary
6932 and do the add. */
6933 if (op1_reg_p
6934 && (ele_reg_p || reg_or_subregno (op1) != FIRST_GPR_REGNO))
6936 insn = gen_add3_insn (base_tmp, op1, element_offset);
6937 gcc_assert (insn != NULL_RTX);
6938 emit_insn (insn);
6941 else if (ele_reg_p
6942 && reg_or_subregno (element_offset) != FIRST_GPR_REGNO)
6944 insn = gen_add3_insn (base_tmp, element_offset, op1);
6945 gcc_assert (insn != NULL_RTX);
6946 emit_insn (insn);
6949 else
6951 emit_move_insn (base_tmp, op1);
6952 emit_insn (gen_add2_insn (base_tmp, element_offset));
6955 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
6959 else
6961 emit_move_insn (base_tmp, addr);
6962 new_addr = gen_rtx_PLUS (Pmode, base_tmp, element_offset);
6965 /* If we have a PLUS, we need to see whether the particular register class
6966 allows for D-FORM or X-FORM addressing. */
6967 if (GET_CODE (new_addr) == PLUS)
6969 rtx op1 = XEXP (new_addr, 1);
6970 addr_mask_type addr_mask;
6971 unsigned int scalar_regno = reg_or_subregno (scalar_reg);
6973 gcc_assert (HARD_REGISTER_NUM_P (scalar_regno));
6974 if (INT_REGNO_P (scalar_regno))
6975 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_GPR];
6977 else if (FP_REGNO_P (scalar_regno))
6978 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_FPR];
6980 else if (ALTIVEC_REGNO_P (scalar_regno))
6981 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_VMX];
6983 else
6984 gcc_unreachable ();
6986 if (REG_P (op1) || SUBREG_P (op1))
6987 valid_addr_p = (addr_mask & RELOAD_REG_INDEXED) != 0;
6988 else
6989 valid_addr_p = (addr_mask & RELOAD_REG_OFFSET) != 0;
6992 else if (REG_P (new_addr) || SUBREG_P (new_addr))
6993 valid_addr_p = true;
6995 else
6996 valid_addr_p = false;
6998 if (!valid_addr_p)
7000 emit_move_insn (base_tmp, new_addr);
7001 new_addr = base_tmp;
7004 return change_address (mem, scalar_mode, new_addr);
7007 /* Split a variable vec_extract operation into the component instructions. */
7009 void
7010 rs6000_split_vec_extract_var (rtx dest, rtx src, rtx element, rtx tmp_gpr,
7011 rtx tmp_altivec)
7013 machine_mode mode = GET_MODE (src);
7014 machine_mode scalar_mode = GET_MODE_INNER (GET_MODE (src));
7015 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7016 int byte_shift = exact_log2 (scalar_size);
7018 gcc_assert (byte_shift >= 0);
7020 /* If we are given a memory address, optimize to load just the element. We
7021 don't have to adjust the vector element number on little endian
7022 systems. */
7023 if (MEM_P (src))
7025 int num_elements = GET_MODE_NUNITS (mode);
7026 rtx num_ele_m1 = GEN_INT (num_elements - 1);
7028 emit_insn (gen_anddi3 (element, element, num_ele_m1));
7029 gcc_assert (REG_P (tmp_gpr));
7030 emit_move_insn (dest, rs6000_adjust_vec_address (dest, src, element,
7031 tmp_gpr, scalar_mode));
7032 return;
7035 else if (REG_P (src) || SUBREG_P (src))
7037 int num_elements = GET_MODE_NUNITS (mode);
7038 int bits_in_element = mode_to_bits (GET_MODE_INNER (mode));
7039 int bit_shift = 7 - exact_log2 (num_elements);
7040 rtx element2;
7041 unsigned int dest_regno = reg_or_subregno (dest);
7042 unsigned int src_regno = reg_or_subregno (src);
7043 unsigned int element_regno = reg_or_subregno (element);
7045 gcc_assert (REG_P (tmp_gpr));
7047 /* See if we want to generate VEXTU{B,H,W}{L,R}X if the destination is in
7048 a general purpose register. */
7049 if (TARGET_P9_VECTOR
7050 && (mode == V16QImode || mode == V8HImode || mode == V4SImode)
7051 && INT_REGNO_P (dest_regno)
7052 && ALTIVEC_REGNO_P (src_regno)
7053 && INT_REGNO_P (element_regno))
7055 rtx dest_si = gen_rtx_REG (SImode, dest_regno);
7056 rtx element_si = gen_rtx_REG (SImode, element_regno);
7058 if (mode == V16QImode)
7059 emit_insn (BYTES_BIG_ENDIAN
7060 ? gen_vextublx (dest_si, element_si, src)
7061 : gen_vextubrx (dest_si, element_si, src));
7063 else if (mode == V8HImode)
7065 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7066 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const1_rtx));
7067 emit_insn (BYTES_BIG_ENDIAN
7068 ? gen_vextuhlx (dest_si, tmp_gpr_si, src)
7069 : gen_vextuhrx (dest_si, tmp_gpr_si, src));
7073 else
7075 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7076 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const2_rtx));
7077 emit_insn (BYTES_BIG_ENDIAN
7078 ? gen_vextuwlx (dest_si, tmp_gpr_si, src)
7079 : gen_vextuwrx (dest_si, tmp_gpr_si, src));
7082 return;
7086 gcc_assert (REG_P (tmp_altivec));
7088 /* For little endian, adjust element ordering. For V2DI/V2DF, we can use
7089 an XOR, otherwise we need to subtract. The shift amount is so VSLO
7090 will shift the element into the upper position (adding 3 to convert a
7091 byte shift into a bit shift). */
7092 if (scalar_size == 8)
7094 if (!BYTES_BIG_ENDIAN)
7096 emit_insn (gen_xordi3 (tmp_gpr, element, const1_rtx));
7097 element2 = tmp_gpr;
7099 else
7100 element2 = element;
7102 /* Generate RLDIC directly to shift left 6 bits and retrieve 1
7103 bit. */
7104 emit_insn (gen_rtx_SET (tmp_gpr,
7105 gen_rtx_AND (DImode,
7106 gen_rtx_ASHIFT (DImode,
7107 element2,
7108 GEN_INT (6)),
7109 GEN_INT (64))));
7111 else
7113 if (!BYTES_BIG_ENDIAN)
7115 rtx num_ele_m1 = GEN_INT (num_elements - 1);
7117 emit_insn (gen_anddi3 (tmp_gpr, element, num_ele_m1));
7118 emit_insn (gen_subdi3 (tmp_gpr, num_ele_m1, tmp_gpr));
7119 element2 = tmp_gpr;
7121 else
7122 element2 = element;
7124 emit_insn (gen_ashldi3 (tmp_gpr, element2, GEN_INT (bit_shift)));
7127 /* Get the value into the lower byte of the Altivec register where VSLO
7128 expects it. */
7129 if (TARGET_P9_VECTOR)
7130 emit_insn (gen_vsx_splat_v2di (tmp_altivec, tmp_gpr));
7131 else if (can_create_pseudo_p ())
7132 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_gpr, tmp_gpr));
7133 else
7135 rtx tmp_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7136 emit_move_insn (tmp_di, tmp_gpr);
7137 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_di, tmp_di));
7140 /* Do the VSLO to get the value into the final location. */
7141 switch (mode)
7143 case E_V2DFmode:
7144 emit_insn (gen_vsx_vslo_v2df (dest, src, tmp_altivec));
7145 return;
7147 case E_V2DImode:
7148 emit_insn (gen_vsx_vslo_v2di (dest, src, tmp_altivec));
7149 return;
7151 case E_V4SFmode:
7153 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7154 rtx tmp_altivec_v4sf = gen_rtx_REG (V4SFmode, REGNO (tmp_altivec));
7155 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7156 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7157 tmp_altivec));
7159 emit_insn (gen_vsx_xscvspdp_scalar2 (dest, tmp_altivec_v4sf));
7160 return;
7163 case E_V4SImode:
7164 case E_V8HImode:
7165 case E_V16QImode:
7167 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7168 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7169 rtx tmp_gpr_di = gen_rtx_REG (DImode, REGNO (dest));
7170 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7171 tmp_altivec));
7172 emit_move_insn (tmp_gpr_di, tmp_altivec_di);
7173 emit_insn (gen_lshrdi3 (tmp_gpr_di, tmp_gpr_di,
7174 GEN_INT (64 - bits_in_element)));
7175 return;
7178 default:
7179 gcc_unreachable ();
7182 return;
7184 else
7185 gcc_unreachable ();
7188 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
7189 selects whether the alignment is abi mandated, optional, or
7190 both abi and optional alignment. */
7192 unsigned int
7193 rs6000_data_alignment (tree type, unsigned int align, enum data_align how)
7195 if (how != align_opt)
7197 if (TREE_CODE (type) == VECTOR_TYPE && align < 128)
7198 align = 128;
7201 if (how != align_abi)
7203 if (TREE_CODE (type) == ARRAY_TYPE
7204 && TYPE_MODE (TREE_TYPE (type)) == QImode)
7206 if (align < BITS_PER_WORD)
7207 align = BITS_PER_WORD;
7211 return align;
7214 /* Implement TARGET_SLOW_UNALIGNED_ACCESS. Altivec vector memory
7215 instructions simply ignore the low bits; VSX memory instructions
7216 are aligned to 4 or 8 bytes. */
7218 static bool
7219 rs6000_slow_unaligned_access (machine_mode mode, unsigned int align)
7221 return (STRICT_ALIGNMENT
7222 || (!TARGET_EFFICIENT_UNALIGNED_VSX
7223 && ((SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && align < 32)
7224 || ((VECTOR_MODE_P (mode) || FLOAT128_VECTOR_P (mode))
7225 && (int) align < VECTOR_ALIGN (mode)))));
7228 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
7230 bool
7231 rs6000_special_adjust_field_align_p (tree type, unsigned int computed)
7233 if (TARGET_ALTIVEC && TREE_CODE (type) == VECTOR_TYPE)
7235 if (computed != 128)
7237 static bool warned;
7238 if (!warned && warn_psabi)
7240 warned = true;
7241 inform (input_location,
7242 "the layout of aggregates containing vectors with"
7243 " %d-byte alignment has changed in GCC 5",
7244 computed / BITS_PER_UNIT);
7247 /* In current GCC there is no special case. */
7248 return false;
7251 return false;
7254 /* AIX increases natural record alignment to doubleword if the first
7255 field is an FP double while the FP fields remain word aligned. */
7257 unsigned int
7258 rs6000_special_round_type_align (tree type, unsigned int computed,
7259 unsigned int specified)
7261 unsigned int align = MAX (computed, specified);
7262 tree field = TYPE_FIELDS (type);
7264 /* Skip all non field decls */
7265 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7266 field = DECL_CHAIN (field);
7268 if (field != NULL && field != type)
7270 type = TREE_TYPE (field);
7271 while (TREE_CODE (type) == ARRAY_TYPE)
7272 type = TREE_TYPE (type);
7274 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
7275 align = MAX (align, 64);
7278 return align;
7281 /* Darwin increases record alignment to the natural alignment of
7282 the first field. */
7284 unsigned int
7285 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
7286 unsigned int specified)
7288 unsigned int align = MAX (computed, specified);
7290 if (TYPE_PACKED (type))
7291 return align;
7293 /* Find the first field, looking down into aggregates. */
7294 do {
7295 tree field = TYPE_FIELDS (type);
7296 /* Skip all non field decls */
7297 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7298 field = DECL_CHAIN (field);
7299 if (! field)
7300 break;
7301 /* A packed field does not contribute any extra alignment. */
7302 if (DECL_PACKED (field))
7303 return align;
7304 type = TREE_TYPE (field);
7305 while (TREE_CODE (type) == ARRAY_TYPE)
7306 type = TREE_TYPE (type);
7307 } while (AGGREGATE_TYPE_P (type));
7309 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
7310 align = MAX (align, TYPE_ALIGN (type));
7312 return align;
7315 /* Return 1 for an operand in small memory on V.4/eabi. */
7318 small_data_operand (rtx op ATTRIBUTE_UNUSED,
7319 machine_mode mode ATTRIBUTE_UNUSED)
7321 #if TARGET_ELF
7322 rtx sym_ref;
7324 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
7325 return 0;
7327 if (DEFAULT_ABI != ABI_V4)
7328 return 0;
7330 if (SYMBOL_REF_P (op))
7331 sym_ref = op;
7333 else if (GET_CODE (op) != CONST
7334 || GET_CODE (XEXP (op, 0)) != PLUS
7335 || !SYMBOL_REF_P (XEXP (XEXP (op, 0), 0))
7336 || !CONST_INT_P (XEXP (XEXP (op, 0), 1)))
7337 return 0;
7339 else
7341 rtx sum = XEXP (op, 0);
7342 HOST_WIDE_INT summand;
7344 /* We have to be careful here, because it is the referenced address
7345 that must be 32k from _SDA_BASE_, not just the symbol. */
7346 summand = INTVAL (XEXP (sum, 1));
7347 if (summand < 0 || summand > g_switch_value)
7348 return 0;
7350 sym_ref = XEXP (sum, 0);
7353 return SYMBOL_REF_SMALL_P (sym_ref);
7354 #else
7355 return 0;
7356 #endif
7359 /* Return true if either operand is a general purpose register. */
7361 bool
7362 gpr_or_gpr_p (rtx op0, rtx op1)
7364 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
7365 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
7368 /* Return true if this is a move direct operation between GPR registers and
7369 floating point/VSX registers. */
7371 bool
7372 direct_move_p (rtx op0, rtx op1)
7374 if (!REG_P (op0) || !REG_P (op1))
7375 return false;
7377 if (!TARGET_DIRECT_MOVE)
7378 return false;
7380 int regno0 = REGNO (op0);
7381 int regno1 = REGNO (op1);
7382 if (!HARD_REGISTER_NUM_P (regno0) || !HARD_REGISTER_NUM_P (regno1))
7383 return false;
7385 if (INT_REGNO_P (regno0) && VSX_REGNO_P (regno1))
7386 return true;
7388 if (VSX_REGNO_P (regno0) && INT_REGNO_P (regno1))
7389 return true;
7391 return false;
7394 /* Return true if the ADDR is an acceptable address for a quad memory
7395 operation of mode MODE (either LQ/STQ for general purpose registers, or
7396 LXV/STXV for vector registers under ISA 3.0. GPR_P is true if this address
7397 is intended for LQ/STQ. If it is false, the address is intended for the ISA
7398 3.0 LXV/STXV instruction. */
7400 bool
7401 quad_address_p (rtx addr, machine_mode mode, bool strict)
7403 rtx op0, op1;
7405 if (GET_MODE_SIZE (mode) != 16)
7406 return false;
7408 if (legitimate_indirect_address_p (addr, strict))
7409 return true;
7411 if (VECTOR_MODE_P (mode) && !mode_supports_dq_form (mode))
7412 return false;
7414 if (GET_CODE (addr) != PLUS)
7415 return false;
7417 op0 = XEXP (addr, 0);
7418 if (!REG_P (op0) || !INT_REG_OK_FOR_BASE_P (op0, strict))
7419 return false;
7421 op1 = XEXP (addr, 1);
7422 if (!CONST_INT_P (op1))
7423 return false;
7425 return quad_address_offset_p (INTVAL (op1));
7428 /* Return true if this is a load or store quad operation. This function does
7429 not handle the atomic quad memory instructions. */
7431 bool
7432 quad_load_store_p (rtx op0, rtx op1)
7434 bool ret;
7436 if (!TARGET_QUAD_MEMORY)
7437 ret = false;
7439 else if (REG_P (op0) && MEM_P (op1))
7440 ret = (quad_int_reg_operand (op0, GET_MODE (op0))
7441 && quad_memory_operand (op1, GET_MODE (op1))
7442 && !reg_overlap_mentioned_p (op0, op1));
7444 else if (MEM_P (op0) && REG_P (op1))
7445 ret = (quad_memory_operand (op0, GET_MODE (op0))
7446 && quad_int_reg_operand (op1, GET_MODE (op1)));
7448 else
7449 ret = false;
7451 if (TARGET_DEBUG_ADDR)
7453 fprintf (stderr, "\n========== quad_load_store, return %s\n",
7454 ret ? "true" : "false");
7455 debug_rtx (gen_rtx_SET (op0, op1));
7458 return ret;
7461 /* Given an address, return a constant offset term if one exists. */
7463 static rtx
7464 address_offset (rtx op)
7466 if (GET_CODE (op) == PRE_INC
7467 || GET_CODE (op) == PRE_DEC)
7468 op = XEXP (op, 0);
7469 else if (GET_CODE (op) == PRE_MODIFY
7470 || GET_CODE (op) == LO_SUM)
7471 op = XEXP (op, 1);
7473 if (GET_CODE (op) == CONST)
7474 op = XEXP (op, 0);
7476 if (GET_CODE (op) == PLUS)
7477 op = XEXP (op, 1);
7479 if (CONST_INT_P (op))
7480 return op;
7482 return NULL_RTX;
7485 /* Return true if the MEM operand is a memory operand suitable for use
7486 with a (full width, possibly multiple) gpr load/store. On
7487 powerpc64 this means the offset must be divisible by 4.
7488 Implements 'Y' constraint.
7490 Accept direct, indexed, offset, lo_sum and tocref. Since this is
7491 a constraint function we know the operand has satisfied a suitable
7492 memory predicate.
7494 Offsetting a lo_sum should not be allowed, except where we know by
7495 alignment that a 32k boundary is not crossed. Note that by
7496 "offsetting" here we mean a further offset to access parts of the
7497 MEM. It's fine to have a lo_sum where the inner address is offset
7498 from a sym, since the same sym+offset will appear in the high part
7499 of the address calculation. */
7501 bool
7502 mem_operand_gpr (rtx op, machine_mode mode)
7504 unsigned HOST_WIDE_INT offset;
7505 int extra;
7506 rtx addr = XEXP (op, 0);
7508 /* PR85755: Allow PRE_INC and PRE_DEC addresses. */
7509 if (TARGET_UPDATE
7510 && (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
7511 && mode_supports_pre_incdec_p (mode)
7512 && legitimate_indirect_address_p (XEXP (addr, 0), false))
7513 return true;
7515 /* Don't allow non-offsettable addresses. See PRs 83969 and 84279. */
7516 if (!rs6000_offsettable_memref_p (op, mode, false))
7517 return false;
7519 op = address_offset (addr);
7520 if (op == NULL_RTX)
7521 return true;
7523 offset = INTVAL (op);
7524 if (TARGET_POWERPC64 && (offset & 3) != 0)
7525 return false;
7527 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
7528 if (extra < 0)
7529 extra = 0;
7531 if (GET_CODE (addr) == LO_SUM)
7532 /* For lo_sum addresses, we must allow any offset except one that
7533 causes a wrap, so test only the low 16 bits. */
7534 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
7536 return offset + 0x8000 < 0x10000u - extra;
7539 /* As above, but for DS-FORM VSX insns. Unlike mem_operand_gpr,
7540 enforce an offset divisible by 4 even for 32-bit. */
7542 bool
7543 mem_operand_ds_form (rtx op, machine_mode mode)
7545 unsigned HOST_WIDE_INT offset;
7546 int extra;
7547 rtx addr = XEXP (op, 0);
7549 if (!offsettable_address_p (false, mode, addr))
7550 return false;
7552 op = address_offset (addr);
7553 if (op == NULL_RTX)
7554 return true;
7556 offset = INTVAL (op);
7557 if ((offset & 3) != 0)
7558 return false;
7560 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
7561 if (extra < 0)
7562 extra = 0;
7564 if (GET_CODE (addr) == LO_SUM)
7565 /* For lo_sum addresses, we must allow any offset except one that
7566 causes a wrap, so test only the low 16 bits. */
7567 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
7569 return offset + 0x8000 < 0x10000u - extra;
7572 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
7574 static bool
7575 reg_offset_addressing_ok_p (machine_mode mode)
7577 switch (mode)
7579 case E_V16QImode:
7580 case E_V8HImode:
7581 case E_V4SFmode:
7582 case E_V4SImode:
7583 case E_V2DFmode:
7584 case E_V2DImode:
7585 case E_V1TImode:
7586 case E_TImode:
7587 case E_TFmode:
7588 case E_KFmode:
7589 /* AltiVec/VSX vector modes. Only reg+reg addressing was valid until the
7590 ISA 3.0 vector d-form addressing mode was added. While TImode is not
7591 a vector mode, if we want to use the VSX registers to move it around,
7592 we need to restrict ourselves to reg+reg addressing. Similarly for
7593 IEEE 128-bit floating point that is passed in a single vector
7594 register. */
7595 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
7596 return mode_supports_dq_form (mode);
7597 break;
7599 case E_SDmode:
7600 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
7601 addressing for the LFIWZX and STFIWX instructions. */
7602 if (TARGET_NO_SDMODE_STACK)
7603 return false;
7604 break;
7606 default:
7607 break;
7610 return true;
7613 static bool
7614 virtual_stack_registers_memory_p (rtx op)
7616 int regnum;
7618 if (REG_P (op))
7619 regnum = REGNO (op);
7621 else if (GET_CODE (op) == PLUS
7622 && REG_P (XEXP (op, 0))
7623 && CONST_INT_P (XEXP (op, 1)))
7624 regnum = REGNO (XEXP (op, 0));
7626 else
7627 return false;
7629 return (regnum >= FIRST_VIRTUAL_REGISTER
7630 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
7633 /* Return true if a MODE sized memory accesses to OP plus OFFSET
7634 is known to not straddle a 32k boundary. This function is used
7635 to determine whether -mcmodel=medium code can use TOC pointer
7636 relative addressing for OP. This means the alignment of the TOC
7637 pointer must also be taken into account, and unfortunately that is
7638 only 8 bytes. */
7640 #ifndef POWERPC64_TOC_POINTER_ALIGNMENT
7641 #define POWERPC64_TOC_POINTER_ALIGNMENT 8
7642 #endif
7644 static bool
7645 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
7646 machine_mode mode)
7648 tree decl;
7649 unsigned HOST_WIDE_INT dsize, dalign, lsb, mask;
7651 if (!SYMBOL_REF_P (op))
7652 return false;
7654 /* ISA 3.0 vector d-form addressing is restricted, don't allow
7655 SYMBOL_REF. */
7656 if (mode_supports_dq_form (mode))
7657 return false;
7659 dsize = GET_MODE_SIZE (mode);
7660 decl = SYMBOL_REF_DECL (op);
7661 if (!decl)
7663 if (dsize == 0)
7664 return false;
7666 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
7667 replacing memory addresses with an anchor plus offset. We
7668 could find the decl by rummaging around in the block->objects
7669 VEC for the given offset but that seems like too much work. */
7670 dalign = BITS_PER_UNIT;
7671 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
7672 && SYMBOL_REF_ANCHOR_P (op)
7673 && SYMBOL_REF_BLOCK (op) != NULL)
7675 struct object_block *block = SYMBOL_REF_BLOCK (op);
7677 dalign = block->alignment;
7678 offset += SYMBOL_REF_BLOCK_OFFSET (op);
7680 else if (CONSTANT_POOL_ADDRESS_P (op))
7682 /* It would be nice to have get_pool_align().. */
7683 machine_mode cmode = get_pool_mode (op);
7685 dalign = GET_MODE_ALIGNMENT (cmode);
7688 else if (DECL_P (decl))
7690 dalign = DECL_ALIGN (decl);
7692 if (dsize == 0)
7694 /* Allow BLKmode when the entire object is known to not
7695 cross a 32k boundary. */
7696 if (!DECL_SIZE_UNIT (decl))
7697 return false;
7699 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl)))
7700 return false;
7702 dsize = tree_to_uhwi (DECL_SIZE_UNIT (decl));
7703 if (dsize > 32768)
7704 return false;
7706 dalign /= BITS_PER_UNIT;
7707 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
7708 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
7709 return dalign >= dsize;
7712 else
7713 gcc_unreachable ();
7715 /* Find how many bits of the alignment we know for this access. */
7716 dalign /= BITS_PER_UNIT;
7717 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
7718 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
7719 mask = dalign - 1;
7720 lsb = offset & -offset;
7721 mask &= lsb - 1;
7722 dalign = mask + 1;
7724 return dalign >= dsize;
7727 static bool
7728 constant_pool_expr_p (rtx op)
7730 rtx base, offset;
7732 split_const (op, &base, &offset);
7733 return (SYMBOL_REF_P (base)
7734 && CONSTANT_POOL_ADDRESS_P (base)
7735 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
7738 /* Create a TOC reference for symbol_ref SYMBOL. If LARGETOC_REG is non-null,
7739 use that as the register to put the HIGH value into if register allocation
7740 is already done. */
7743 create_TOC_reference (rtx symbol, rtx largetoc_reg)
7745 rtx tocrel, tocreg, hi;
7747 if (TARGET_DEBUG_ADDR)
7749 if (SYMBOL_REF_P (symbol))
7750 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
7751 XSTR (symbol, 0));
7752 else
7754 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
7755 GET_RTX_NAME (GET_CODE (symbol)));
7756 debug_rtx (symbol);
7760 if (!can_create_pseudo_p ())
7761 df_set_regs_ever_live (TOC_REGISTER, true);
7763 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
7764 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
7765 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
7766 return tocrel;
7768 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
7769 if (largetoc_reg != NULL)
7771 emit_move_insn (largetoc_reg, hi);
7772 hi = largetoc_reg;
7774 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
7777 /* These are only used to pass through from print_operand/print_operand_address
7778 to rs6000_output_addr_const_extra over the intervening function
7779 output_addr_const which is not target code. */
7780 static const_rtx tocrel_base_oac, tocrel_offset_oac;
7782 /* Return true if OP is a toc pointer relative address (the output
7783 of create_TOC_reference). If STRICT, do not match non-split
7784 -mcmodel=large/medium toc pointer relative addresses. If the pointers
7785 are non-NULL, place base and offset pieces in TOCREL_BASE_RET and
7786 TOCREL_OFFSET_RET respectively. */
7788 bool
7789 toc_relative_expr_p (const_rtx op, bool strict, const_rtx *tocrel_base_ret,
7790 const_rtx *tocrel_offset_ret)
7792 if (!TARGET_TOC)
7793 return false;
7795 if (TARGET_CMODEL != CMODEL_SMALL)
7797 /* When strict ensure we have everything tidy. */
7798 if (strict
7799 && !(GET_CODE (op) == LO_SUM
7800 && REG_P (XEXP (op, 0))
7801 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict)))
7802 return false;
7804 /* When not strict, allow non-split TOC addresses and also allow
7805 (lo_sum (high ..)) TOC addresses created during reload. */
7806 if (GET_CODE (op) == LO_SUM)
7807 op = XEXP (op, 1);
7810 const_rtx tocrel_base = op;
7811 const_rtx tocrel_offset = const0_rtx;
7813 if (GET_CODE (op) == PLUS && add_cint_operand (XEXP (op, 1), GET_MODE (op)))
7815 tocrel_base = XEXP (op, 0);
7816 tocrel_offset = XEXP (op, 1);
7819 if (tocrel_base_ret)
7820 *tocrel_base_ret = tocrel_base;
7821 if (tocrel_offset_ret)
7822 *tocrel_offset_ret = tocrel_offset;
7824 return (GET_CODE (tocrel_base) == UNSPEC
7825 && XINT (tocrel_base, 1) == UNSPEC_TOCREL
7826 && REG_P (XVECEXP (tocrel_base, 0, 1))
7827 && REGNO (XVECEXP (tocrel_base, 0, 1)) == TOC_REGISTER);
7830 /* Return true if X is a constant pool address, and also for cmodel=medium
7831 if X is a toc-relative address known to be offsettable within MODE. */
7833 bool
7834 legitimate_constant_pool_address_p (const_rtx x, machine_mode mode,
7835 bool strict)
7837 const_rtx tocrel_base, tocrel_offset;
7838 return (toc_relative_expr_p (x, strict, &tocrel_base, &tocrel_offset)
7839 && (TARGET_CMODEL != CMODEL_MEDIUM
7840 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
7841 || mode == QImode
7842 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
7843 INTVAL (tocrel_offset), mode)));
7846 static bool
7847 legitimate_small_data_p (machine_mode mode, rtx x)
7849 return (DEFAULT_ABI == ABI_V4
7850 && !flag_pic && !TARGET_TOC
7851 && (SYMBOL_REF_P (x) || GET_CODE (x) == CONST)
7852 && small_data_operand (x, mode));
7855 bool
7856 rs6000_legitimate_offset_address_p (machine_mode mode, rtx x,
7857 bool strict, bool worst_case)
7859 unsigned HOST_WIDE_INT offset;
7860 unsigned int extra;
7862 if (GET_CODE (x) != PLUS)
7863 return false;
7864 if (!REG_P (XEXP (x, 0)))
7865 return false;
7866 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
7867 return false;
7868 if (mode_supports_dq_form (mode))
7869 return quad_address_p (x, mode, strict);
7870 if (!reg_offset_addressing_ok_p (mode))
7871 return virtual_stack_registers_memory_p (x);
7872 if (legitimate_constant_pool_address_p (x, mode, strict || lra_in_progress))
7873 return true;
7874 if (!CONST_INT_P (XEXP (x, 1)))
7875 return false;
7877 offset = INTVAL (XEXP (x, 1));
7878 extra = 0;
7879 switch (mode)
7881 case E_DFmode:
7882 case E_DDmode:
7883 case E_DImode:
7884 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
7885 addressing. */
7886 if (VECTOR_MEM_VSX_P (mode))
7887 return false;
7889 if (!worst_case)
7890 break;
7891 if (!TARGET_POWERPC64)
7892 extra = 4;
7893 else if (offset & 3)
7894 return false;
7895 break;
7897 case E_TFmode:
7898 case E_IFmode:
7899 case E_KFmode:
7900 case E_TDmode:
7901 case E_TImode:
7902 case E_PTImode:
7903 extra = 8;
7904 if (!worst_case)
7905 break;
7906 if (!TARGET_POWERPC64)
7907 extra = 12;
7908 else if (offset & 3)
7909 return false;
7910 break;
7912 default:
7913 break;
7916 offset += 0x8000;
7917 return offset < 0x10000 - extra;
7920 bool
7921 legitimate_indexed_address_p (rtx x, int strict)
7923 rtx op0, op1;
7925 if (GET_CODE (x) != PLUS)
7926 return false;
7928 op0 = XEXP (x, 0);
7929 op1 = XEXP (x, 1);
7931 return (REG_P (op0) && REG_P (op1)
7932 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
7933 && INT_REG_OK_FOR_INDEX_P (op1, strict))
7934 || (INT_REG_OK_FOR_BASE_P (op1, strict)
7935 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
7938 bool
7939 avoiding_indexed_address_p (machine_mode mode)
7941 /* Avoid indexed addressing for modes that have non-indexed
7942 load/store instruction forms. */
7943 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
7946 bool
7947 legitimate_indirect_address_p (rtx x, int strict)
7949 return REG_P (x) && INT_REG_OK_FOR_BASE_P (x, strict);
7952 bool
7953 macho_lo_sum_memory_operand (rtx x, machine_mode mode)
7955 if (!TARGET_MACHO || !flag_pic
7956 || mode != SImode || !MEM_P (x))
7957 return false;
7958 x = XEXP (x, 0);
7960 if (GET_CODE (x) != LO_SUM)
7961 return false;
7962 if (!REG_P (XEXP (x, 0)))
7963 return false;
7964 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
7965 return false;
7966 x = XEXP (x, 1);
7968 return CONSTANT_P (x);
7971 static bool
7972 legitimate_lo_sum_address_p (machine_mode mode, rtx x, int strict)
7974 if (GET_CODE (x) != LO_SUM)
7975 return false;
7976 if (!REG_P (XEXP (x, 0)))
7977 return false;
7978 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
7979 return false;
7980 /* quad word addresses are restricted, and we can't use LO_SUM. */
7981 if (mode_supports_dq_form (mode))
7982 return false;
7983 x = XEXP (x, 1);
7985 if (TARGET_ELF || TARGET_MACHO)
7987 bool large_toc_ok;
7989 if (DEFAULT_ABI == ABI_V4 && flag_pic)
7990 return false;
7991 /* LRA doesn't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
7992 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
7993 recognizes some LO_SUM addresses as valid although this
7994 function says opposite. In most cases, LRA through different
7995 transformations can generate correct code for address reloads.
7996 It cannot manage only some LO_SUM cases. So we need to add
7997 code here saying that some addresses are still valid. */
7998 large_toc_ok = (lra_in_progress && TARGET_CMODEL != CMODEL_SMALL
7999 && small_toc_ref (x, VOIDmode));
8000 if (TARGET_TOC && ! large_toc_ok)
8001 return false;
8002 if (GET_MODE_NUNITS (mode) != 1)
8003 return false;
8004 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
8005 && !(/* ??? Assume floating point reg based on mode? */
8006 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode)))
8007 return false;
8009 return CONSTANT_P (x) || large_toc_ok;
8012 return false;
8016 /* Try machine-dependent ways of modifying an illegitimate address
8017 to be legitimate. If we find one, return the new, valid address.
8018 This is used from only one place: `memory_address' in explow.c.
8020 OLDX is the address as it was before break_out_memory_refs was
8021 called. In some cases it is useful to look at this to decide what
8022 needs to be done.
8024 It is always safe for this function to do nothing. It exists to
8025 recognize opportunities to optimize the output.
8027 On RS/6000, first check for the sum of a register with a constant
8028 integer that is out of range. If so, generate code to add the
8029 constant with the low-order 16 bits masked to the register and force
8030 this result into another register (this can be done with `cau').
8031 Then generate an address of REG+(CONST&0xffff), allowing for the
8032 possibility of bit 16 being a one.
8034 Then check for the sum of a register and something not constant, try to
8035 load the other things into a register and return the sum. */
8037 static rtx
8038 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
8039 machine_mode mode)
8041 unsigned int extra;
8043 if (!reg_offset_addressing_ok_p (mode)
8044 || mode_supports_dq_form (mode))
8046 if (virtual_stack_registers_memory_p (x))
8047 return x;
8049 /* In theory we should not be seeing addresses of the form reg+0,
8050 but just in case it is generated, optimize it away. */
8051 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
8052 return force_reg (Pmode, XEXP (x, 0));
8054 /* For TImode with load/store quad, restrict addresses to just a single
8055 pointer, so it works with both GPRs and VSX registers. */
8056 /* Make sure both operands are registers. */
8057 else if (GET_CODE (x) == PLUS
8058 && (mode != TImode || !TARGET_VSX))
8059 return gen_rtx_PLUS (Pmode,
8060 force_reg (Pmode, XEXP (x, 0)),
8061 force_reg (Pmode, XEXP (x, 1)));
8062 else
8063 return force_reg (Pmode, x);
8065 if (SYMBOL_REF_P (x))
8067 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
8068 if (model != 0)
8069 return rs6000_legitimize_tls_address (x, model);
8072 extra = 0;
8073 switch (mode)
8075 case E_TFmode:
8076 case E_TDmode:
8077 case E_TImode:
8078 case E_PTImode:
8079 case E_IFmode:
8080 case E_KFmode:
8081 /* As in legitimate_offset_address_p we do not assume
8082 worst-case. The mode here is just a hint as to the registers
8083 used. A TImode is usually in gprs, but may actually be in
8084 fprs. Leave worst-case scenario for reload to handle via
8085 insn constraints. PTImode is only GPRs. */
8086 extra = 8;
8087 break;
8088 default:
8089 break;
8092 if (GET_CODE (x) == PLUS
8093 && REG_P (XEXP (x, 0))
8094 && CONST_INT_P (XEXP (x, 1))
8095 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
8096 >= 0x10000 - extra))
8098 HOST_WIDE_INT high_int, low_int;
8099 rtx sum;
8100 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
8101 if (low_int >= 0x8000 - extra)
8102 low_int = 0;
8103 high_int = INTVAL (XEXP (x, 1)) - low_int;
8104 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
8105 GEN_INT (high_int)), 0);
8106 return plus_constant (Pmode, sum, low_int);
8108 else if (GET_CODE (x) == PLUS
8109 && REG_P (XEXP (x, 0))
8110 && !CONST_INT_P (XEXP (x, 1))
8111 && GET_MODE_NUNITS (mode) == 1
8112 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8113 || (/* ??? Assume floating point reg based on mode? */
8114 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode)))
8115 && !avoiding_indexed_address_p (mode))
8117 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
8118 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
8120 else if ((TARGET_ELF
8121 #if TARGET_MACHO
8122 || !MACHO_DYNAMIC_NO_PIC_P
8123 #endif
8125 && TARGET_32BIT
8126 && TARGET_NO_TOC
8127 && !flag_pic
8128 && !CONST_INT_P (x)
8129 && !CONST_WIDE_INT_P (x)
8130 && !CONST_DOUBLE_P (x)
8131 && CONSTANT_P (x)
8132 && GET_MODE_NUNITS (mode) == 1
8133 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8134 || (/* ??? Assume floating point reg based on mode? */
8135 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode))))
8137 rtx reg = gen_reg_rtx (Pmode);
8138 if (TARGET_ELF)
8139 emit_insn (gen_elf_high (reg, x));
8140 else
8141 emit_insn (gen_macho_high (reg, x));
8142 return gen_rtx_LO_SUM (Pmode, reg, x);
8144 else if (TARGET_TOC
8145 && SYMBOL_REF_P (x)
8146 && constant_pool_expr_p (x)
8147 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
8148 return create_TOC_reference (x, NULL_RTX);
8149 else
8150 return x;
8153 /* Debug version of rs6000_legitimize_address. */
8154 static rtx
8155 rs6000_debug_legitimize_address (rtx x, rtx oldx, machine_mode mode)
8157 rtx ret;
8158 rtx_insn *insns;
8160 start_sequence ();
8161 ret = rs6000_legitimize_address (x, oldx, mode);
8162 insns = get_insns ();
8163 end_sequence ();
8165 if (ret != x)
8167 fprintf (stderr,
8168 "\nrs6000_legitimize_address: mode %s, old code %s, "
8169 "new code %s, modified\n",
8170 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
8171 GET_RTX_NAME (GET_CODE (ret)));
8173 fprintf (stderr, "Original address:\n");
8174 debug_rtx (x);
8176 fprintf (stderr, "oldx:\n");
8177 debug_rtx (oldx);
8179 fprintf (stderr, "New address:\n");
8180 debug_rtx (ret);
8182 if (insns)
8184 fprintf (stderr, "Insns added:\n");
8185 debug_rtx_list (insns, 20);
8188 else
8190 fprintf (stderr,
8191 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
8192 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
8194 debug_rtx (x);
8197 if (insns)
8198 emit_insn (insns);
8200 return ret;
8203 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8204 We need to emit DTP-relative relocations. */
8206 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
8207 static void
8208 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
8210 switch (size)
8212 case 4:
8213 fputs ("\t.long\t", file);
8214 break;
8215 case 8:
8216 fputs (DOUBLE_INT_ASM_OP, file);
8217 break;
8218 default:
8219 gcc_unreachable ();
8221 output_addr_const (file, x);
8222 if (TARGET_ELF)
8223 fputs ("@dtprel+0x8000", file);
8224 else if (TARGET_XCOFF && SYMBOL_REF_P (x))
8226 switch (SYMBOL_REF_TLS_MODEL (x))
8228 case 0:
8229 break;
8230 case TLS_MODEL_LOCAL_EXEC:
8231 fputs ("@le", file);
8232 break;
8233 case TLS_MODEL_INITIAL_EXEC:
8234 fputs ("@ie", file);
8235 break;
8236 case TLS_MODEL_GLOBAL_DYNAMIC:
8237 case TLS_MODEL_LOCAL_DYNAMIC:
8238 fputs ("@m", file);
8239 break;
8240 default:
8241 gcc_unreachable ();
8246 /* Return true if X is a symbol that refers to real (rather than emulated)
8247 TLS. */
8249 static bool
8250 rs6000_real_tls_symbol_ref_p (rtx x)
8252 return (SYMBOL_REF_P (x)
8253 && SYMBOL_REF_TLS_MODEL (x) >= TLS_MODEL_REAL);
8256 /* In the name of slightly smaller debug output, and to cater to
8257 general assembler lossage, recognize various UNSPEC sequences
8258 and turn them back into a direct symbol reference. */
8260 static rtx
8261 rs6000_delegitimize_address (rtx orig_x)
8263 rtx x, y, offset;
8265 if (GET_CODE (orig_x) == UNSPEC && XINT (orig_x, 1) == UNSPEC_FUSION_GPR)
8266 orig_x = XVECEXP (orig_x, 0, 0);
8268 orig_x = delegitimize_mem_from_attrs (orig_x);
8270 x = orig_x;
8271 if (MEM_P (x))
8272 x = XEXP (x, 0);
8274 y = x;
8275 if (TARGET_CMODEL != CMODEL_SMALL && GET_CODE (y) == LO_SUM)
8276 y = XEXP (y, 1);
8278 offset = NULL_RTX;
8279 if (GET_CODE (y) == PLUS
8280 && GET_MODE (y) == Pmode
8281 && CONST_INT_P (XEXP (y, 1)))
8283 offset = XEXP (y, 1);
8284 y = XEXP (y, 0);
8287 if (GET_CODE (y) == UNSPEC && XINT (y, 1) == UNSPEC_TOCREL)
8289 y = XVECEXP (y, 0, 0);
8291 #ifdef HAVE_AS_TLS
8292 /* Do not associate thread-local symbols with the original
8293 constant pool symbol. */
8294 if (TARGET_XCOFF
8295 && SYMBOL_REF_P (y)
8296 && CONSTANT_POOL_ADDRESS_P (y)
8297 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y)))
8298 return orig_x;
8299 #endif
8301 if (offset != NULL_RTX)
8302 y = gen_rtx_PLUS (Pmode, y, offset);
8303 if (!MEM_P (orig_x))
8304 return y;
8305 else
8306 return replace_equiv_address_nv (orig_x, y);
8309 if (TARGET_MACHO
8310 && GET_CODE (orig_x) == LO_SUM
8311 && GET_CODE (XEXP (orig_x, 1)) == CONST)
8313 y = XEXP (XEXP (orig_x, 1), 0);
8314 if (GET_CODE (y) == UNSPEC && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
8315 return XVECEXP (y, 0, 0);
8318 return orig_x;
8321 /* Return true if X shouldn't be emitted into the debug info.
8322 The linker doesn't like .toc section references from
8323 .debug_* sections, so reject .toc section symbols. */
8325 static bool
8326 rs6000_const_not_ok_for_debug_p (rtx x)
8328 if (GET_CODE (x) == UNSPEC)
8329 return true;
8330 if (SYMBOL_REF_P (x)
8331 && CONSTANT_POOL_ADDRESS_P (x))
8333 rtx c = get_pool_constant (x);
8334 machine_mode cmode = get_pool_mode (x);
8335 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
8336 return true;
8339 return false;
8342 /* Implement the TARGET_LEGITIMATE_COMBINED_INSN hook. */
8344 static bool
8345 rs6000_legitimate_combined_insn (rtx_insn *insn)
8347 int icode = INSN_CODE (insn);
8349 /* Reject creating doloop insns. Combine should not be allowed
8350 to create these for a number of reasons:
8351 1) In a nested loop, if combine creates one of these in an
8352 outer loop and the register allocator happens to allocate ctr
8353 to the outer loop insn, then the inner loop can't use ctr.
8354 Inner loops ought to be more highly optimized.
8355 2) Combine often wants to create one of these from what was
8356 originally a three insn sequence, first combining the three
8357 insns to two, then to ctrsi/ctrdi. When ctrsi/ctrdi is not
8358 allocated ctr, the splitter takes use back to the three insn
8359 sequence. It's better to stop combine at the two insn
8360 sequence.
8361 3) Faced with not being able to allocate ctr for ctrsi/crtdi
8362 insns, the register allocator sometimes uses floating point
8363 or vector registers for the pseudo. Since ctrsi/ctrdi is a
8364 jump insn and output reloads are not implemented for jumps,
8365 the ctrsi/ctrdi splitters need to handle all possible cases.
8366 That's a pain, and it gets to be seriously difficult when a
8367 splitter that runs after reload needs memory to transfer from
8368 a gpr to fpr. See PR70098 and PR71763 which are not fixed
8369 for the difficult case. It's better to not create problems
8370 in the first place. */
8371 if (icode != CODE_FOR_nothing
8372 && (icode == CODE_FOR_bdz_si
8373 || icode == CODE_FOR_bdz_di
8374 || icode == CODE_FOR_bdnz_si
8375 || icode == CODE_FOR_bdnz_di
8376 || icode == CODE_FOR_bdztf_si
8377 || icode == CODE_FOR_bdztf_di
8378 || icode == CODE_FOR_bdnztf_si
8379 || icode == CODE_FOR_bdnztf_di))
8380 return false;
8382 return true;
8385 /* Construct the SYMBOL_REF for the tls_get_addr function. */
8387 static GTY(()) rtx rs6000_tls_symbol;
8388 static rtx
8389 rs6000_tls_get_addr (void)
8391 if (!rs6000_tls_symbol)
8392 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
8394 return rs6000_tls_symbol;
8397 /* Construct the SYMBOL_REF for TLS GOT references. */
8399 static GTY(()) rtx rs6000_got_symbol;
8401 rs6000_got_sym (void)
8403 if (!rs6000_got_symbol)
8405 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
8406 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
8407 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
8410 return rs6000_got_symbol;
8413 /* AIX Thread-Local Address support. */
8415 static rtx
8416 rs6000_legitimize_tls_address_aix (rtx addr, enum tls_model model)
8418 rtx sym, mem, tocref, tlsreg, tmpreg, dest, tlsaddr;
8419 const char *name;
8420 char *tlsname;
8422 name = XSTR (addr, 0);
8423 /* Append TLS CSECT qualifier, unless the symbol already is qualified
8424 or the symbol will be in TLS private data section. */
8425 if (name[strlen (name) - 1] != ']'
8426 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr))
8427 || bss_initializer_p (SYMBOL_REF_DECL (addr))))
8429 tlsname = XALLOCAVEC (char, strlen (name) + 4);
8430 strcpy (tlsname, name);
8431 strcat (tlsname,
8432 bss_initializer_p (SYMBOL_REF_DECL (addr)) ? "[UL]" : "[TL]");
8433 tlsaddr = copy_rtx (addr);
8434 XSTR (tlsaddr, 0) = ggc_strdup (tlsname);
8436 else
8437 tlsaddr = addr;
8439 /* Place addr into TOC constant pool. */
8440 sym = force_const_mem (GET_MODE (tlsaddr), tlsaddr);
8442 /* Output the TOC entry and create the MEM referencing the value. */
8443 if (constant_pool_expr_p (XEXP (sym, 0))
8444 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym, 0)), Pmode))
8446 tocref = create_TOC_reference (XEXP (sym, 0), NULL_RTX);
8447 mem = gen_const_mem (Pmode, tocref);
8448 set_mem_alias_set (mem, get_TOC_alias_set ());
8450 else
8451 return sym;
8453 /* Use global-dynamic for local-dynamic. */
8454 if (model == TLS_MODEL_GLOBAL_DYNAMIC
8455 || model == TLS_MODEL_LOCAL_DYNAMIC)
8457 /* Create new TOC reference for @m symbol. */
8458 name = XSTR (XVECEXP (XEXP (mem, 0), 0, 0), 0);
8459 tlsname = XALLOCAVEC (char, strlen (name) + 1);
8460 strcpy (tlsname, "*LCM");
8461 strcat (tlsname, name + 3);
8462 rtx modaddr = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tlsname));
8463 SYMBOL_REF_FLAGS (modaddr) |= SYMBOL_FLAG_LOCAL;
8464 tocref = create_TOC_reference (modaddr, NULL_RTX);
8465 rtx modmem = gen_const_mem (Pmode, tocref);
8466 set_mem_alias_set (modmem, get_TOC_alias_set ());
8468 rtx modreg = gen_reg_rtx (Pmode);
8469 emit_insn (gen_rtx_SET (modreg, modmem));
8471 tmpreg = gen_reg_rtx (Pmode);
8472 emit_insn (gen_rtx_SET (tmpreg, mem));
8474 dest = gen_reg_rtx (Pmode);
8475 if (TARGET_32BIT)
8476 emit_insn (gen_tls_get_addrsi (dest, modreg, tmpreg));
8477 else
8478 emit_insn (gen_tls_get_addrdi (dest, modreg, tmpreg));
8479 return dest;
8481 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
8482 else if (TARGET_32BIT)
8484 tlsreg = gen_reg_rtx (SImode);
8485 emit_insn (gen_tls_get_tpointer (tlsreg));
8487 else
8488 tlsreg = gen_rtx_REG (DImode, 13);
8490 /* Load the TOC value into temporary register. */
8491 tmpreg = gen_reg_rtx (Pmode);
8492 emit_insn (gen_rtx_SET (tmpreg, mem));
8493 set_unique_reg_note (get_last_insn (), REG_EQUAL,
8494 gen_rtx_MINUS (Pmode, addr, tlsreg));
8496 /* Add TOC symbol value to TLS pointer. */
8497 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tmpreg, tlsreg));
8499 return dest;
8502 /* Output arg setup instructions for a !TARGET_TLS_MARKERS
8503 __tls_get_addr call. */
8505 void
8506 rs6000_output_tlsargs (rtx *operands)
8508 /* Set up operands for output_asm_insn, without modifying OPERANDS. */
8509 rtx op[3];
8511 /* The set dest of the call, ie. r3, which is also the first arg reg. */
8512 op[0] = operands[0];
8513 /* The TLS symbol from global_tlsarg stashed as CALL operand 2. */
8514 op[1] = XVECEXP (operands[2], 0, 0);
8515 if (XINT (operands[2], 1) == UNSPEC_TLSGD)
8517 /* The GOT register. */
8518 op[2] = XVECEXP (operands[2], 0, 1);
8519 if (TARGET_CMODEL != CMODEL_SMALL)
8520 output_asm_insn ("addis %0,%2,%1@got@tlsgd@ha\n\t"
8521 "addi %0,%0,%1@got@tlsgd@l", op);
8522 else
8523 output_asm_insn ("addi %0,%2,%1@got@tlsgd", op);
8525 else if (XINT (operands[2], 1) == UNSPEC_TLSLD)
8527 if (TARGET_CMODEL != CMODEL_SMALL)
8528 output_asm_insn ("addis %0,%1,%&@got@tlsld@ha\n\t"
8529 "addi %0,%0,%&@got@tlsld@l", op);
8530 else
8531 output_asm_insn ("addi %0,%1,%&@got@tlsld", op);
8533 else
8534 gcc_unreachable ();
8537 /* Passes the tls arg value for global dynamic and local dynamic
8538 emit_library_call_value in rs6000_legitimize_tls_address to
8539 rs6000_call_aix and rs6000_call_sysv. This is used to emit the
8540 marker relocs put on __tls_get_addr calls. */
8541 static rtx global_tlsarg;
8543 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
8544 this (thread-local) address. */
8546 static rtx
8547 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
8549 rtx dest, insn;
8551 if (TARGET_XCOFF)
8552 return rs6000_legitimize_tls_address_aix (addr, model);
8554 dest = gen_reg_rtx (Pmode);
8555 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
8557 rtx tlsreg;
8559 if (TARGET_64BIT)
8561 tlsreg = gen_rtx_REG (Pmode, 13);
8562 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
8564 else
8566 tlsreg = gen_rtx_REG (Pmode, 2);
8567 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
8569 emit_insn (insn);
8571 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
8573 rtx tlsreg, tmp;
8575 tmp = gen_reg_rtx (Pmode);
8576 if (TARGET_64BIT)
8578 tlsreg = gen_rtx_REG (Pmode, 13);
8579 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
8581 else
8583 tlsreg = gen_rtx_REG (Pmode, 2);
8584 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
8586 emit_insn (insn);
8587 if (TARGET_64BIT)
8588 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
8589 else
8590 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
8591 emit_insn (insn);
8593 else
8595 rtx got, tga, tmp1, tmp2;
8597 /* We currently use relocations like @got@tlsgd for tls, which
8598 means the linker will handle allocation of tls entries, placing
8599 them in the .got section. So use a pointer to the .got section,
8600 not one to secondary TOC sections used by 64-bit -mminimal-toc,
8601 or to secondary GOT sections used by 32-bit -fPIC. */
8602 if (TARGET_64BIT)
8603 got = gen_rtx_REG (Pmode, 2);
8604 else
8606 if (flag_pic == 1)
8607 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
8608 else
8610 rtx gsym = rs6000_got_sym ();
8611 got = gen_reg_rtx (Pmode);
8612 if (flag_pic == 0)
8613 rs6000_emit_move (got, gsym, Pmode);
8614 else
8616 rtx mem, lab;
8618 tmp1 = gen_reg_rtx (Pmode);
8619 tmp2 = gen_reg_rtx (Pmode);
8620 mem = gen_const_mem (Pmode, tmp1);
8621 lab = gen_label_rtx ();
8622 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
8623 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
8624 if (TARGET_LINK_STACK)
8625 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
8626 emit_move_insn (tmp2, mem);
8627 rtx_insn *last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
8628 set_unique_reg_note (last, REG_EQUAL, gsym);
8633 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
8635 rtx arg = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addr, got),
8636 UNSPEC_TLSGD);
8637 tga = rs6000_tls_get_addr ();
8638 global_tlsarg = arg;
8639 if (TARGET_TLS_MARKERS)
8641 rtx argreg = gen_rtx_REG (Pmode, 3);
8642 emit_insn (gen_rtx_SET (argreg, arg));
8643 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
8644 argreg, Pmode);
8646 else
8647 emit_library_call_value (tga, dest, LCT_CONST, Pmode);
8648 global_tlsarg = NULL_RTX;
8650 /* Make a note so that the result of this call can be CSEd. */
8651 rtvec vec = gen_rtvec (1, copy_rtx (arg));
8652 rtx uns = gen_rtx_UNSPEC (Pmode, vec, UNSPEC_TLS_GET_ADDR);
8653 set_unique_reg_note (get_last_insn (), REG_EQUAL, uns);
8655 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
8657 rtx arg = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got), UNSPEC_TLSLD);
8658 tga = rs6000_tls_get_addr ();
8659 tmp1 = gen_reg_rtx (Pmode);
8660 global_tlsarg = arg;
8661 if (TARGET_TLS_MARKERS)
8663 rtx argreg = gen_rtx_REG (Pmode, 3);
8664 emit_insn (gen_rtx_SET (argreg, arg));
8665 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
8666 argreg, Pmode);
8668 else
8669 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode);
8670 global_tlsarg = NULL_RTX;
8672 /* Make a note so that the result of this call can be CSEd. */
8673 rtvec vec = gen_rtvec (1, copy_rtx (arg));
8674 rtx uns = gen_rtx_UNSPEC (Pmode, vec, UNSPEC_TLS_GET_ADDR);
8675 set_unique_reg_note (get_last_insn (), REG_EQUAL, uns);
8677 if (rs6000_tls_size == 16)
8679 if (TARGET_64BIT)
8680 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
8681 else
8682 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
8684 else if (rs6000_tls_size == 32)
8686 tmp2 = gen_reg_rtx (Pmode);
8687 if (TARGET_64BIT)
8688 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
8689 else
8690 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
8691 emit_insn (insn);
8692 if (TARGET_64BIT)
8693 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
8694 else
8695 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
8697 else
8699 tmp2 = gen_reg_rtx (Pmode);
8700 if (TARGET_64BIT)
8701 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
8702 else
8703 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
8704 emit_insn (insn);
8705 insn = gen_rtx_SET (dest, gen_rtx_PLUS (Pmode, tmp2, tmp1));
8707 emit_insn (insn);
8709 else
8711 /* IE, or 64-bit offset LE. */
8712 tmp2 = gen_reg_rtx (Pmode);
8713 if (TARGET_64BIT)
8714 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
8715 else
8716 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
8717 emit_insn (insn);
8718 if (TARGET_64BIT)
8719 insn = gen_tls_tls_64 (dest, tmp2, addr);
8720 else
8721 insn = gen_tls_tls_32 (dest, tmp2, addr);
8722 emit_insn (insn);
8726 return dest;
8729 /* Only create the global variable for the stack protect guard if we are using
8730 the global flavor of that guard. */
8731 static tree
8732 rs6000_init_stack_protect_guard (void)
8734 if (rs6000_stack_protector_guard == SSP_GLOBAL)
8735 return default_stack_protect_guard ();
8737 return NULL_TREE;
8740 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
8742 static bool
8743 rs6000_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
8745 if (GET_CODE (x) == HIGH
8746 && GET_CODE (XEXP (x, 0)) == UNSPEC)
8747 return true;
8749 /* A TLS symbol in the TOC cannot contain a sum. */
8750 if (GET_CODE (x) == CONST
8751 && GET_CODE (XEXP (x, 0)) == PLUS
8752 && SYMBOL_REF_P (XEXP (XEXP (x, 0), 0))
8753 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) != 0)
8754 return true;
8756 /* Do not place an ELF TLS symbol in the constant pool. */
8757 return TARGET_ELF && tls_referenced_p (x);
8760 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
8761 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
8762 can be addressed relative to the toc pointer. */
8764 static bool
8765 use_toc_relative_ref (rtx sym, machine_mode mode)
8767 return ((constant_pool_expr_p (sym)
8768 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
8769 get_pool_mode (sym)))
8770 || (TARGET_CMODEL == CMODEL_MEDIUM
8771 && SYMBOL_REF_LOCAL_P (sym)
8772 && GET_MODE_SIZE (mode) <= POWERPC64_TOC_POINTER_ALIGNMENT));
8775 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
8776 that is a valid memory address for an instruction.
8777 The MODE argument is the machine mode for the MEM expression
8778 that wants to use this address.
8780 On the RS/6000, there are four valid address: a SYMBOL_REF that
8781 refers to a constant pool entry of an address (or the sum of it
8782 plus a constant), a short (16-bit signed) constant plus a register,
8783 the sum of two registers, or a register indirect, possibly with an
8784 auto-increment. For DFmode, DDmode and DImode with a constant plus
8785 register, we must ensure that both words are addressable or PowerPC64
8786 with offset word aligned.
8788 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
8789 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
8790 because adjacent memory cells are accessed by adding word-sized offsets
8791 during assembly output. */
8792 static bool
8793 rs6000_legitimate_address_p (machine_mode mode, rtx x, bool reg_ok_strict)
8795 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
8796 bool quad_offset_p = mode_supports_dq_form (mode);
8798 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
8799 if (VECTOR_MEM_ALTIVEC_P (mode)
8800 && GET_CODE (x) == AND
8801 && CONST_INT_P (XEXP (x, 1))
8802 && INTVAL (XEXP (x, 1)) == -16)
8803 x = XEXP (x, 0);
8805 if (TARGET_ELF && RS6000_SYMBOL_REF_TLS_P (x))
8806 return 0;
8807 if (legitimate_indirect_address_p (x, reg_ok_strict))
8808 return 1;
8809 if (TARGET_UPDATE
8810 && (GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
8811 && mode_supports_pre_incdec_p (mode)
8812 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
8813 return 1;
8814 /* Handle restricted vector d-form offsets in ISA 3.0. */
8815 if (quad_offset_p)
8817 if (quad_address_p (x, mode, reg_ok_strict))
8818 return 1;
8820 else if (virtual_stack_registers_memory_p (x))
8821 return 1;
8823 else if (reg_offset_p)
8825 if (legitimate_small_data_p (mode, x))
8826 return 1;
8827 if (legitimate_constant_pool_address_p (x, mode,
8828 reg_ok_strict || lra_in_progress))
8829 return 1;
8832 /* For TImode, if we have TImode in VSX registers, only allow register
8833 indirect addresses. This will allow the values to go in either GPRs
8834 or VSX registers without reloading. The vector types would tend to
8835 go into VSX registers, so we allow REG+REG, while TImode seems
8836 somewhat split, in that some uses are GPR based, and some VSX based. */
8837 /* FIXME: We could loosen this by changing the following to
8838 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX)
8839 but currently we cannot allow REG+REG addressing for TImode. See
8840 PR72827 for complete details on how this ends up hoodwinking DSE. */
8841 if (mode == TImode && TARGET_VSX)
8842 return 0;
8843 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
8844 if (! reg_ok_strict
8845 && reg_offset_p
8846 && GET_CODE (x) == PLUS
8847 && REG_P (XEXP (x, 0))
8848 && (XEXP (x, 0) == virtual_stack_vars_rtx
8849 || XEXP (x, 0) == arg_pointer_rtx)
8850 && CONST_INT_P (XEXP (x, 1)))
8851 return 1;
8852 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
8853 return 1;
8854 if (!FLOAT128_2REG_P (mode)
8855 && (TARGET_HARD_FLOAT
8856 || TARGET_POWERPC64
8857 || (mode != DFmode && mode != DDmode))
8858 && (TARGET_POWERPC64 || mode != DImode)
8859 && (mode != TImode || VECTOR_MEM_VSX_P (TImode))
8860 && mode != PTImode
8861 && !avoiding_indexed_address_p (mode)
8862 && legitimate_indexed_address_p (x, reg_ok_strict))
8863 return 1;
8864 if (TARGET_UPDATE && GET_CODE (x) == PRE_MODIFY
8865 && mode_supports_pre_modify_p (mode)
8866 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
8867 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
8868 reg_ok_strict, false)
8869 || (!avoiding_indexed_address_p (mode)
8870 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
8871 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
8872 return 1;
8873 if (reg_offset_p && !quad_offset_p
8874 && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
8875 return 1;
8876 return 0;
8879 /* Debug version of rs6000_legitimate_address_p. */
8880 static bool
8881 rs6000_debug_legitimate_address_p (machine_mode mode, rtx x,
8882 bool reg_ok_strict)
8884 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
8885 fprintf (stderr,
8886 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
8887 "strict = %d, reload = %s, code = %s\n",
8888 ret ? "true" : "false",
8889 GET_MODE_NAME (mode),
8890 reg_ok_strict,
8891 (reload_completed ? "after" : "before"),
8892 GET_RTX_NAME (GET_CODE (x)));
8893 debug_rtx (x);
8895 return ret;
8898 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
8900 static bool
8901 rs6000_mode_dependent_address_p (const_rtx addr,
8902 addr_space_t as ATTRIBUTE_UNUSED)
8904 return rs6000_mode_dependent_address_ptr (addr);
8907 /* Go to LABEL if ADDR (a legitimate address expression)
8908 has an effect that depends on the machine mode it is used for.
8910 On the RS/6000 this is true of all integral offsets (since AltiVec
8911 and VSX modes don't allow them) or is a pre-increment or decrement.
8913 ??? Except that due to conceptual problems in offsettable_address_p
8914 we can't really report the problems of integral offsets. So leave
8915 this assuming that the adjustable offset must be valid for the
8916 sub-words of a TFmode operand, which is what we had before. */
8918 static bool
8919 rs6000_mode_dependent_address (const_rtx addr)
8921 switch (GET_CODE (addr))
8923 case PLUS:
8924 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
8925 is considered a legitimate address before reload, so there
8926 are no offset restrictions in that case. Note that this
8927 condition is safe in strict mode because any address involving
8928 virtual_stack_vars_rtx or arg_pointer_rtx would already have
8929 been rejected as illegitimate. */
8930 if (XEXP (addr, 0) != virtual_stack_vars_rtx
8931 && XEXP (addr, 0) != arg_pointer_rtx
8932 && CONST_INT_P (XEXP (addr, 1)))
8934 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
8935 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
8937 break;
8939 case LO_SUM:
8940 /* Anything in the constant pool is sufficiently aligned that
8941 all bytes have the same high part address. */
8942 return !legitimate_constant_pool_address_p (addr, QImode, false);
8944 /* Auto-increment cases are now treated generically in recog.c. */
8945 case PRE_MODIFY:
8946 return TARGET_UPDATE;
8948 /* AND is only allowed in Altivec loads. */
8949 case AND:
8950 return true;
8952 default:
8953 break;
8956 return false;
8959 /* Debug version of rs6000_mode_dependent_address. */
8960 static bool
8961 rs6000_debug_mode_dependent_address (const_rtx addr)
8963 bool ret = rs6000_mode_dependent_address (addr);
8965 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
8966 ret ? "true" : "false");
8967 debug_rtx (addr);
8969 return ret;
8972 /* Implement FIND_BASE_TERM. */
8975 rs6000_find_base_term (rtx op)
8977 rtx base;
8979 base = op;
8980 if (GET_CODE (base) == CONST)
8981 base = XEXP (base, 0);
8982 if (GET_CODE (base) == PLUS)
8983 base = XEXP (base, 0);
8984 if (GET_CODE (base) == UNSPEC)
8985 switch (XINT (base, 1))
8987 case UNSPEC_TOCREL:
8988 case UNSPEC_MACHOPIC_OFFSET:
8989 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
8990 for aliasing purposes. */
8991 return XVECEXP (base, 0, 0);
8994 return op;
8997 /* More elaborate version of recog's offsettable_memref_p predicate
8998 that works around the ??? note of rs6000_mode_dependent_address.
8999 In particular it accepts
9001 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
9003 in 32-bit mode, that the recog predicate rejects. */
9005 static bool
9006 rs6000_offsettable_memref_p (rtx op, machine_mode reg_mode, bool strict)
9008 bool worst_case;
9010 if (!MEM_P (op))
9011 return false;
9013 /* First mimic offsettable_memref_p. */
9014 if (offsettable_address_p (strict, GET_MODE (op), XEXP (op, 0)))
9015 return true;
9017 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
9018 the latter predicate knows nothing about the mode of the memory
9019 reference and, therefore, assumes that it is the largest supported
9020 mode (TFmode). As a consequence, legitimate offsettable memory
9021 references are rejected. rs6000_legitimate_offset_address_p contains
9022 the correct logic for the PLUS case of rs6000_mode_dependent_address,
9023 at least with a little bit of help here given that we know the
9024 actual registers used. */
9025 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
9026 || GET_MODE_SIZE (reg_mode) == 4);
9027 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
9028 strict, worst_case);
9031 /* Determine the reassociation width to be used in reassociate_bb.
9032 This takes into account how many parallel operations we
9033 can actually do of a given type, and also the latency.
9035 int add/sub 6/cycle
9036 mul 2/cycle
9037 vect add/sub/mul 2/cycle
9038 fp add/sub/mul 2/cycle
9039 dfp 1/cycle
9042 static int
9043 rs6000_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED,
9044 machine_mode mode)
9046 switch (rs6000_tune)
9048 case PROCESSOR_POWER8:
9049 case PROCESSOR_POWER9:
9050 case PROCESSOR_FUTURE:
9051 if (DECIMAL_FLOAT_MODE_P (mode))
9052 return 1;
9053 if (VECTOR_MODE_P (mode))
9054 return 4;
9055 if (INTEGRAL_MODE_P (mode))
9056 return 1;
9057 if (FLOAT_MODE_P (mode))
9058 return 4;
9059 break;
9060 default:
9061 break;
9063 return 1;
9066 /* Change register usage conditional on target flags. */
9067 static void
9068 rs6000_conditional_register_usage (void)
9070 int i;
9072 if (TARGET_DEBUG_TARGET)
9073 fprintf (stderr, "rs6000_conditional_register_usage called\n");
9075 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
9076 if (TARGET_64BIT)
9077 fixed_regs[13] = call_used_regs[13]
9078 = call_really_used_regs[13] = 1;
9080 /* Conditionally disable FPRs. */
9081 if (TARGET_SOFT_FLOAT)
9082 for (i = 32; i < 64; i++)
9083 fixed_regs[i] = call_used_regs[i]
9084 = call_really_used_regs[i] = 1;
9086 /* The TOC register is not killed across calls in a way that is
9087 visible to the compiler. */
9088 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9089 call_really_used_regs[2] = 0;
9091 if (DEFAULT_ABI == ABI_V4 && flag_pic == 2)
9092 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9094 if (DEFAULT_ABI == ABI_V4 && flag_pic == 1)
9095 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9096 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9097 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9099 if (DEFAULT_ABI == ABI_DARWIN && flag_pic)
9100 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9101 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9102 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9104 if (TARGET_TOC && TARGET_MINIMAL_TOC)
9105 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9106 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9108 if (!TARGET_ALTIVEC && !TARGET_VSX)
9110 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
9111 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
9112 call_really_used_regs[VRSAVE_REGNO] = 1;
9115 if (TARGET_ALTIVEC || TARGET_VSX)
9116 global_regs[VSCR_REGNO] = 1;
9118 if (TARGET_ALTIVEC_ABI)
9120 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
9121 call_used_regs[i] = call_really_used_regs[i] = 1;
9123 /* AIX reserves VR20:31 in non-extended ABI mode. */
9124 if (TARGET_XCOFF)
9125 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
9126 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
9131 /* Output insns to set DEST equal to the constant SOURCE as a series of
9132 lis, ori and shl instructions and return TRUE. */
9134 bool
9135 rs6000_emit_set_const (rtx dest, rtx source)
9137 machine_mode mode = GET_MODE (dest);
9138 rtx temp, set;
9139 rtx_insn *insn;
9140 HOST_WIDE_INT c;
9142 gcc_checking_assert (CONST_INT_P (source));
9143 c = INTVAL (source);
9144 switch (mode)
9146 case E_QImode:
9147 case E_HImode:
9148 emit_insn (gen_rtx_SET (dest, source));
9149 return true;
9151 case E_SImode:
9152 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
9154 emit_insn (gen_rtx_SET (copy_rtx (temp),
9155 GEN_INT (c & ~(HOST_WIDE_INT) 0xffff)));
9156 emit_insn (gen_rtx_SET (dest,
9157 gen_rtx_IOR (SImode, copy_rtx (temp),
9158 GEN_INT (c & 0xffff))));
9159 break;
9161 case E_DImode:
9162 if (!TARGET_POWERPC64)
9164 rtx hi, lo;
9166 hi = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN == 0,
9167 DImode);
9168 lo = operand_subword_force (dest, WORDS_BIG_ENDIAN != 0,
9169 DImode);
9170 emit_move_insn (hi, GEN_INT (c >> 32));
9171 c = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
9172 emit_move_insn (lo, GEN_INT (c));
9174 else
9175 rs6000_emit_set_long_const (dest, c);
9176 break;
9178 default:
9179 gcc_unreachable ();
9182 insn = get_last_insn ();
9183 set = single_set (insn);
9184 if (! CONSTANT_P (SET_SRC (set)))
9185 set_unique_reg_note (insn, REG_EQUAL, GEN_INT (c));
9187 return true;
9190 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
9191 Output insns to set DEST equal to the constant C as a series of
9192 lis, ori and shl instructions. */
9194 static void
9195 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c)
9197 rtx temp;
9198 HOST_WIDE_INT ud1, ud2, ud3, ud4;
9200 ud1 = c & 0xffff;
9201 c = c >> 16;
9202 ud2 = c & 0xffff;
9203 c = c >> 16;
9204 ud3 = c & 0xffff;
9205 c = c >> 16;
9206 ud4 = c & 0xffff;
9208 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
9209 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
9210 emit_move_insn (dest, GEN_INT ((ud1 ^ 0x8000) - 0x8000));
9212 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
9213 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
9215 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9217 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9218 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
9219 if (ud1 != 0)
9220 emit_move_insn (dest,
9221 gen_rtx_IOR (DImode, copy_rtx (temp),
9222 GEN_INT (ud1)));
9224 else if (ud3 == 0 && ud4 == 0)
9226 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9228 gcc_assert (ud2 & 0x8000);
9229 emit_move_insn (copy_rtx (temp),
9230 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
9231 if (ud1 != 0)
9232 emit_move_insn (copy_rtx (temp),
9233 gen_rtx_IOR (DImode, copy_rtx (temp),
9234 GEN_INT (ud1)));
9235 emit_move_insn (dest,
9236 gen_rtx_ZERO_EXTEND (DImode,
9237 gen_lowpart (SImode,
9238 copy_rtx (temp))));
9240 else if ((ud4 == 0xffff && (ud3 & 0x8000))
9241 || (ud4 == 0 && ! (ud3 & 0x8000)))
9243 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9245 emit_move_insn (copy_rtx (temp),
9246 GEN_INT (((ud3 << 16) ^ 0x80000000) - 0x80000000));
9247 if (ud2 != 0)
9248 emit_move_insn (copy_rtx (temp),
9249 gen_rtx_IOR (DImode, copy_rtx (temp),
9250 GEN_INT (ud2)));
9251 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9252 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
9253 GEN_INT (16)));
9254 if (ud1 != 0)
9255 emit_move_insn (dest,
9256 gen_rtx_IOR (DImode, copy_rtx (temp),
9257 GEN_INT (ud1)));
9259 else
9261 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9263 emit_move_insn (copy_rtx (temp),
9264 GEN_INT (((ud4 << 16) ^ 0x80000000) - 0x80000000));
9265 if (ud3 != 0)
9266 emit_move_insn (copy_rtx (temp),
9267 gen_rtx_IOR (DImode, copy_rtx (temp),
9268 GEN_INT (ud3)));
9270 emit_move_insn (ud2 != 0 || ud1 != 0 ? copy_rtx (temp) : dest,
9271 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
9272 GEN_INT (32)));
9273 if (ud2 != 0)
9274 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9275 gen_rtx_IOR (DImode, copy_rtx (temp),
9276 GEN_INT (ud2 << 16)));
9277 if (ud1 != 0)
9278 emit_move_insn (dest,
9279 gen_rtx_IOR (DImode, copy_rtx (temp),
9280 GEN_INT (ud1)));
9284 /* Helper for the following. Get rid of [r+r] memory refs
9285 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
9287 static void
9288 rs6000_eliminate_indexed_memrefs (rtx operands[2])
9290 if (MEM_P (operands[0])
9291 && !REG_P (XEXP (operands[0], 0))
9292 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
9293 GET_MODE (operands[0]), false))
9294 operands[0]
9295 = replace_equiv_address (operands[0],
9296 copy_addr_to_reg (XEXP (operands[0], 0)));
9298 if (MEM_P (operands[1])
9299 && !REG_P (XEXP (operands[1], 0))
9300 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
9301 GET_MODE (operands[1]), false))
9302 operands[1]
9303 = replace_equiv_address (operands[1],
9304 copy_addr_to_reg (XEXP (operands[1], 0)));
9307 /* Generate a vector of constants to permute MODE for a little-endian
9308 storage operation by swapping the two halves of a vector. */
9309 static rtvec
9310 rs6000_const_vec (machine_mode mode)
9312 int i, subparts;
9313 rtvec v;
9315 switch (mode)
9317 case E_V1TImode:
9318 subparts = 1;
9319 break;
9320 case E_V2DFmode:
9321 case E_V2DImode:
9322 subparts = 2;
9323 break;
9324 case E_V4SFmode:
9325 case E_V4SImode:
9326 subparts = 4;
9327 break;
9328 case E_V8HImode:
9329 subparts = 8;
9330 break;
9331 case E_V16QImode:
9332 subparts = 16;
9333 break;
9334 default:
9335 gcc_unreachable();
9338 v = rtvec_alloc (subparts);
9340 for (i = 0; i < subparts / 2; ++i)
9341 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i + subparts / 2);
9342 for (i = subparts / 2; i < subparts; ++i)
9343 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i - subparts / 2);
9345 return v;
9348 /* Emit an lxvd2x, stxvd2x, or xxpermdi instruction for a VSX load or
9349 store operation. */
9350 void
9351 rs6000_emit_le_vsx_permute (rtx dest, rtx source, machine_mode mode)
9353 /* Scalar permutations are easier to express in integer modes rather than
9354 floating-point modes, so cast them here. We use V1TImode instead
9355 of TImode to ensure that the values don't go through GPRs. */
9356 if (FLOAT128_VECTOR_P (mode))
9358 dest = gen_lowpart (V1TImode, dest);
9359 source = gen_lowpart (V1TImode, source);
9360 mode = V1TImode;
9363 /* Use ROTATE instead of VEC_SELECT if the mode contains only a single
9364 scalar. */
9365 if (mode == TImode || mode == V1TImode)
9366 emit_insn (gen_rtx_SET (dest, gen_rtx_ROTATE (mode, source,
9367 GEN_INT (64))));
9368 else
9370 rtx par = gen_rtx_PARALLEL (VOIDmode, rs6000_const_vec (mode));
9371 emit_insn (gen_rtx_SET (dest, gen_rtx_VEC_SELECT (mode, source, par)));
9375 /* Emit a little-endian load from vector memory location SOURCE to VSX
9376 register DEST in mode MODE. The load is done with two permuting
9377 insn's that represent an lxvd2x and xxpermdi. */
9378 void
9379 rs6000_emit_le_vsx_load (rtx dest, rtx source, machine_mode mode)
9381 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
9382 V1TImode). */
9383 if (mode == TImode || mode == V1TImode)
9385 mode = V2DImode;
9386 dest = gen_lowpart (V2DImode, dest);
9387 source = adjust_address (source, V2DImode, 0);
9390 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest) : dest;
9391 rs6000_emit_le_vsx_permute (tmp, source, mode);
9392 rs6000_emit_le_vsx_permute (dest, tmp, mode);
9395 /* Emit a little-endian store to vector memory location DEST from VSX
9396 register SOURCE in mode MODE. The store is done with two permuting
9397 insn's that represent an xxpermdi and an stxvd2x. */
9398 void
9399 rs6000_emit_le_vsx_store (rtx dest, rtx source, machine_mode mode)
9401 /* This should never be called during or after LRA, because it does
9402 not re-permute the source register. It is intended only for use
9403 during expand. */
9404 gcc_assert (!lra_in_progress && !reload_completed);
9406 /* Use V2DImode to do swaps of types with 128-bit scalar parts (TImode,
9407 V1TImode). */
9408 if (mode == TImode || mode == V1TImode)
9410 mode = V2DImode;
9411 dest = adjust_address (dest, V2DImode, 0);
9412 source = gen_lowpart (V2DImode, source);
9415 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source) : source;
9416 rs6000_emit_le_vsx_permute (tmp, source, mode);
9417 rs6000_emit_le_vsx_permute (dest, tmp, mode);
9420 /* Emit a sequence representing a little-endian VSX load or store,
9421 moving data from SOURCE to DEST in mode MODE. This is done
9422 separately from rs6000_emit_move to ensure it is called only
9423 during expand. LE VSX loads and stores introduced later are
9424 handled with a split. The expand-time RTL generation allows
9425 us to optimize away redundant pairs of register-permutes. */
9426 void
9427 rs6000_emit_le_vsx_move (rtx dest, rtx source, machine_mode mode)
9429 gcc_assert (!BYTES_BIG_ENDIAN
9430 && VECTOR_MEM_VSX_P (mode)
9431 && !TARGET_P9_VECTOR
9432 && !gpr_or_gpr_p (dest, source)
9433 && (MEM_P (source) ^ MEM_P (dest)));
9435 if (MEM_P (source))
9437 gcc_assert (REG_P (dest) || SUBREG_P (dest));
9438 rs6000_emit_le_vsx_load (dest, source, mode);
9440 else
9442 if (!REG_P (source))
9443 source = force_reg (mode, source);
9444 rs6000_emit_le_vsx_store (dest, source, mode);
9448 /* Return whether a SFmode or SImode move can be done without converting one
9449 mode to another. This arrises when we have:
9451 (SUBREG:SF (REG:SI ...))
9452 (SUBREG:SI (REG:SF ...))
9454 and one of the values is in a floating point/vector register, where SFmode
9455 scalars are stored in DFmode format. */
9457 bool
9458 valid_sf_si_move (rtx dest, rtx src, machine_mode mode)
9460 if (TARGET_ALLOW_SF_SUBREG)
9461 return true;
9463 if (mode != SFmode && GET_MODE_CLASS (mode) != MODE_INT)
9464 return true;
9466 if (!SUBREG_P (src) || !sf_subreg_operand (src, mode))
9467 return true;
9469 /*. Allow (set (SUBREG:SI (REG:SF)) (SUBREG:SI (REG:SF))). */
9470 if (SUBREG_P (dest))
9472 rtx dest_subreg = SUBREG_REG (dest);
9473 rtx src_subreg = SUBREG_REG (src);
9474 return GET_MODE (dest_subreg) == GET_MODE (src_subreg);
9477 return false;
9481 /* Helper function to change moves with:
9483 (SUBREG:SF (REG:SI)) and
9484 (SUBREG:SI (REG:SF))
9486 into separate UNSPEC insns. In the PowerPC architecture, scalar SFmode
9487 values are stored as DFmode values in the VSX registers. We need to convert
9488 the bits before we can use a direct move or operate on the bits in the
9489 vector register as an integer type.
9491 Skip things like (set (SUBREG:SI (...) (SUBREG:SI (...)). */
9493 static bool
9494 rs6000_emit_move_si_sf_subreg (rtx dest, rtx source, machine_mode mode)
9496 if (TARGET_DIRECT_MOVE_64BIT && !reload_completed
9497 && (!SUBREG_P (dest) || !sf_subreg_operand (dest, mode))
9498 && SUBREG_P (source) && sf_subreg_operand (source, mode))
9500 rtx inner_source = SUBREG_REG (source);
9501 machine_mode inner_mode = GET_MODE (inner_source);
9503 if (mode == SImode && inner_mode == SFmode)
9505 emit_insn (gen_movsi_from_sf (dest, inner_source));
9506 return true;
9509 if (mode == SFmode && inner_mode == SImode)
9511 emit_insn (gen_movsf_from_si (dest, inner_source));
9512 return true;
9516 return false;
9519 /* Emit a move from SOURCE to DEST in mode MODE. */
9520 void
9521 rs6000_emit_move (rtx dest, rtx source, machine_mode mode)
9523 rtx operands[2];
9524 operands[0] = dest;
9525 operands[1] = source;
9527 if (TARGET_DEBUG_ADDR)
9529 fprintf (stderr,
9530 "\nrs6000_emit_move: mode = %s, lra_in_progress = %d, "
9531 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
9532 GET_MODE_NAME (mode),
9533 lra_in_progress,
9534 reload_completed,
9535 can_create_pseudo_p ());
9536 debug_rtx (dest);
9537 fprintf (stderr, "source:\n");
9538 debug_rtx (source);
9541 /* Check that we get CONST_WIDE_INT only when we should. */
9542 if (CONST_WIDE_INT_P (operands[1])
9543 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
9544 gcc_unreachable ();
9546 #ifdef HAVE_AS_GNU_ATTRIBUTE
9547 /* If we use a long double type, set the flags in .gnu_attribute that say
9548 what the long double type is. This is to allow the linker's warning
9549 message for the wrong long double to be useful, even if the function does
9550 not do a call (for example, doing a 128-bit add on power9 if the long
9551 double type is IEEE 128-bit. Do not set this if __ibm128 or __floa128 are
9552 used if they aren't the default long dobule type. */
9553 if (rs6000_gnu_attr && (HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT))
9555 if (TARGET_LONG_DOUBLE_128 && (mode == TFmode || mode == TCmode))
9556 rs6000_passes_float = rs6000_passes_long_double = true;
9558 else if (!TARGET_LONG_DOUBLE_128 && (mode == DFmode || mode == DCmode))
9559 rs6000_passes_float = rs6000_passes_long_double = true;
9561 #endif
9563 /* See if we need to special case SImode/SFmode SUBREG moves. */
9564 if ((mode == SImode || mode == SFmode) && SUBREG_P (source)
9565 && rs6000_emit_move_si_sf_subreg (dest, source, mode))
9566 return;
9568 /* Check if GCC is setting up a block move that will end up using FP
9569 registers as temporaries. We must make sure this is acceptable. */
9570 if (MEM_P (operands[0])
9571 && MEM_P (operands[1])
9572 && mode == DImode
9573 && (rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[0]))
9574 || rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[1])))
9575 && ! (rs6000_slow_unaligned_access (SImode,
9576 (MEM_ALIGN (operands[0]) > 32
9577 ? 32 : MEM_ALIGN (operands[0])))
9578 || rs6000_slow_unaligned_access (SImode,
9579 (MEM_ALIGN (operands[1]) > 32
9580 ? 32 : MEM_ALIGN (operands[1]))))
9581 && ! MEM_VOLATILE_P (operands [0])
9582 && ! MEM_VOLATILE_P (operands [1]))
9584 emit_move_insn (adjust_address (operands[0], SImode, 0),
9585 adjust_address (operands[1], SImode, 0));
9586 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
9587 adjust_address (copy_rtx (operands[1]), SImode, 4));
9588 return;
9591 if (can_create_pseudo_p () && MEM_P (operands[0])
9592 && !gpc_reg_operand (operands[1], mode))
9593 operands[1] = force_reg (mode, operands[1]);
9595 /* Recognize the case where operand[1] is a reference to thread-local
9596 data and load its address to a register. */
9597 if (tls_referenced_p (operands[1]))
9599 enum tls_model model;
9600 rtx tmp = operands[1];
9601 rtx addend = NULL;
9603 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
9605 addend = XEXP (XEXP (tmp, 0), 1);
9606 tmp = XEXP (XEXP (tmp, 0), 0);
9609 gcc_assert (SYMBOL_REF_P (tmp));
9610 model = SYMBOL_REF_TLS_MODEL (tmp);
9611 gcc_assert (model != 0);
9613 tmp = rs6000_legitimize_tls_address (tmp, model);
9614 if (addend)
9616 tmp = gen_rtx_PLUS (mode, tmp, addend);
9617 tmp = force_operand (tmp, operands[0]);
9619 operands[1] = tmp;
9622 /* 128-bit constant floating-point values on Darwin should really be loaded
9623 as two parts. However, this premature splitting is a problem when DFmode
9624 values can go into Altivec registers. */
9625 if (TARGET_MACHO && CONST_DOUBLE_P (operands[1]) && FLOAT128_IBM_P (mode)
9626 && !reg_addr[DFmode].scalar_in_vmx_p)
9628 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
9629 simplify_gen_subreg (DFmode, operands[1], mode, 0),
9630 DFmode);
9631 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
9632 GET_MODE_SIZE (DFmode)),
9633 simplify_gen_subreg (DFmode, operands[1], mode,
9634 GET_MODE_SIZE (DFmode)),
9635 DFmode);
9636 return;
9639 /* Transform (p0:DD, (SUBREG:DD p1:SD)) to ((SUBREG:SD p0:DD),
9640 p1:SD) if p1 is not of floating point class and p0 is spilled as
9641 we can have no analogous movsd_store for this. */
9642 if (lra_in_progress && mode == DDmode
9643 && REG_P (operands[0]) && !HARD_REGISTER_P (operands[0])
9644 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
9645 && SUBREG_P (operands[1]) && REG_P (SUBREG_REG (operands[1]))
9646 && GET_MODE (SUBREG_REG (operands[1])) == SDmode)
9648 enum reg_class cl;
9649 int regno = REGNO (SUBREG_REG (operands[1]));
9651 if (!HARD_REGISTER_NUM_P (regno))
9653 cl = reg_preferred_class (regno);
9654 regno = reg_renumber[regno];
9655 if (regno < 0)
9656 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][1];
9658 if (regno >= 0 && ! FP_REGNO_P (regno))
9660 mode = SDmode;
9661 operands[0] = gen_lowpart_SUBREG (SDmode, operands[0]);
9662 operands[1] = SUBREG_REG (operands[1]);
9665 if (lra_in_progress
9666 && mode == SDmode
9667 && REG_P (operands[0]) && !HARD_REGISTER_P (operands[0])
9668 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
9669 && (REG_P (operands[1])
9670 || (SUBREG_P (operands[1]) && REG_P (SUBREG_REG (operands[1])))))
9672 int regno = reg_or_subregno (operands[1]);
9673 enum reg_class cl;
9675 if (!HARD_REGISTER_NUM_P (regno))
9677 cl = reg_preferred_class (regno);
9678 gcc_assert (cl != NO_REGS);
9679 regno = reg_renumber[regno];
9680 if (regno < 0)
9681 regno = ira_class_hard_regs[cl][0];
9683 if (FP_REGNO_P (regno))
9685 if (GET_MODE (operands[0]) != DDmode)
9686 operands[0] = gen_rtx_SUBREG (DDmode, operands[0], 0);
9687 emit_insn (gen_movsd_store (operands[0], operands[1]));
9689 else if (INT_REGNO_P (regno))
9690 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
9691 else
9692 gcc_unreachable();
9693 return;
9695 /* Transform ((SUBREG:DD p0:SD), p1:DD) to (p0:SD, (SUBREG:SD
9696 p:DD)) if p0 is not of floating point class and p1 is spilled as
9697 we can have no analogous movsd_load for this. */
9698 if (lra_in_progress && mode == DDmode
9699 && SUBREG_P (operands[0]) && REG_P (SUBREG_REG (operands[0]))
9700 && GET_MODE (SUBREG_REG (operands[0])) == SDmode
9701 && REG_P (operands[1]) && !HARD_REGISTER_P (operands[1])
9702 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
9704 enum reg_class cl;
9705 int regno = REGNO (SUBREG_REG (operands[0]));
9707 if (!HARD_REGISTER_NUM_P (regno))
9709 cl = reg_preferred_class (regno);
9710 regno = reg_renumber[regno];
9711 if (regno < 0)
9712 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][0];
9714 if (regno >= 0 && ! FP_REGNO_P (regno))
9716 mode = SDmode;
9717 operands[0] = SUBREG_REG (operands[0]);
9718 operands[1] = gen_lowpart_SUBREG (SDmode, operands[1]);
9721 if (lra_in_progress
9722 && mode == SDmode
9723 && (REG_P (operands[0])
9724 || (SUBREG_P (operands[0]) && REG_P (SUBREG_REG (operands[0]))))
9725 && REG_P (operands[1]) && !HARD_REGISTER_P (operands[1])
9726 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
9728 int regno = reg_or_subregno (operands[0]);
9729 enum reg_class cl;
9731 if (!HARD_REGISTER_NUM_P (regno))
9733 cl = reg_preferred_class (regno);
9734 gcc_assert (cl != NO_REGS);
9735 regno = reg_renumber[regno];
9736 if (regno < 0)
9737 regno = ira_class_hard_regs[cl][0];
9739 if (FP_REGNO_P (regno))
9741 if (GET_MODE (operands[1]) != DDmode)
9742 operands[1] = gen_rtx_SUBREG (DDmode, operands[1], 0);
9743 emit_insn (gen_movsd_load (operands[0], operands[1]));
9745 else if (INT_REGNO_P (regno))
9746 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
9747 else
9748 gcc_unreachable();
9749 return;
9752 /* FIXME: In the long term, this switch statement should go away
9753 and be replaced by a sequence of tests based on things like
9754 mode == Pmode. */
9755 switch (mode)
9757 case E_HImode:
9758 case E_QImode:
9759 if (CONSTANT_P (operands[1])
9760 && !CONST_INT_P (operands[1]))
9761 operands[1] = force_const_mem (mode, operands[1]);
9762 break;
9764 case E_TFmode:
9765 case E_TDmode:
9766 case E_IFmode:
9767 case E_KFmode:
9768 if (FLOAT128_2REG_P (mode))
9769 rs6000_eliminate_indexed_memrefs (operands);
9770 /* fall through */
9772 case E_DFmode:
9773 case E_DDmode:
9774 case E_SFmode:
9775 case E_SDmode:
9776 if (CONSTANT_P (operands[1])
9777 && ! easy_fp_constant (operands[1], mode))
9778 operands[1] = force_const_mem (mode, operands[1]);
9779 break;
9781 case E_V16QImode:
9782 case E_V8HImode:
9783 case E_V4SFmode:
9784 case E_V4SImode:
9785 case E_V2DFmode:
9786 case E_V2DImode:
9787 case E_V1TImode:
9788 if (CONSTANT_P (operands[1])
9789 && !easy_vector_constant (operands[1], mode))
9790 operands[1] = force_const_mem (mode, operands[1]);
9791 break;
9793 case E_SImode:
9794 case E_DImode:
9795 /* Use default pattern for address of ELF small data */
9796 if (TARGET_ELF
9797 && mode == Pmode
9798 && DEFAULT_ABI == ABI_V4
9799 && (SYMBOL_REF_P (operands[1])
9800 || GET_CODE (operands[1]) == CONST)
9801 && small_data_operand (operands[1], mode))
9803 emit_insn (gen_rtx_SET (operands[0], operands[1]));
9804 return;
9807 if (DEFAULT_ABI == ABI_V4
9808 && mode == Pmode && mode == SImode
9809 && flag_pic == 1 && got_operand (operands[1], mode))
9811 emit_insn (gen_movsi_got (operands[0], operands[1]));
9812 return;
9815 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
9816 && TARGET_NO_TOC
9817 && ! flag_pic
9818 && mode == Pmode
9819 && CONSTANT_P (operands[1])
9820 && GET_CODE (operands[1]) != HIGH
9821 && !CONST_INT_P (operands[1]))
9823 rtx target = (!can_create_pseudo_p ()
9824 ? operands[0]
9825 : gen_reg_rtx (mode));
9827 /* If this is a function address on -mcall-aixdesc,
9828 convert it to the address of the descriptor. */
9829 if (DEFAULT_ABI == ABI_AIX
9830 && SYMBOL_REF_P (operands[1])
9831 && XSTR (operands[1], 0)[0] == '.')
9833 const char *name = XSTR (operands[1], 0);
9834 rtx new_ref;
9835 while (*name == '.')
9836 name++;
9837 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
9838 CONSTANT_POOL_ADDRESS_P (new_ref)
9839 = CONSTANT_POOL_ADDRESS_P (operands[1]);
9840 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
9841 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
9842 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
9843 operands[1] = new_ref;
9846 if (DEFAULT_ABI == ABI_DARWIN)
9848 #if TARGET_MACHO
9849 if (MACHO_DYNAMIC_NO_PIC_P)
9851 /* Take care of any required data indirection. */
9852 operands[1] = rs6000_machopic_legitimize_pic_address (
9853 operands[1], mode, operands[0]);
9854 if (operands[0] != operands[1])
9855 emit_insn (gen_rtx_SET (operands[0], operands[1]));
9856 return;
9858 #endif
9859 emit_insn (gen_macho_high (target, operands[1]));
9860 emit_insn (gen_macho_low (operands[0], target, operands[1]));
9861 return;
9864 emit_insn (gen_elf_high (target, operands[1]));
9865 emit_insn (gen_elf_low (operands[0], target, operands[1]));
9866 return;
9869 /* If this is a SYMBOL_REF that refers to a constant pool entry,
9870 and we have put it in the TOC, we just need to make a TOC-relative
9871 reference to it. */
9872 if (TARGET_TOC
9873 && SYMBOL_REF_P (operands[1])
9874 && use_toc_relative_ref (operands[1], mode))
9875 operands[1] = create_TOC_reference (operands[1], operands[0]);
9876 else if (mode == Pmode
9877 && CONSTANT_P (operands[1])
9878 && GET_CODE (operands[1]) != HIGH
9879 && ((REG_P (operands[0])
9880 && FP_REGNO_P (REGNO (operands[0])))
9881 || !CONST_INT_P (operands[1])
9882 || (num_insns_constant (operands[1], mode)
9883 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
9884 && !toc_relative_expr_p (operands[1], false, NULL, NULL)
9885 && (TARGET_CMODEL == CMODEL_SMALL
9886 || can_create_pseudo_p ()
9887 || (REG_P (operands[0])
9888 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
9891 #if TARGET_MACHO
9892 /* Darwin uses a special PIC legitimizer. */
9893 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
9895 operands[1] =
9896 rs6000_machopic_legitimize_pic_address (operands[1], mode,
9897 operands[0]);
9898 if (operands[0] != operands[1])
9899 emit_insn (gen_rtx_SET (operands[0], operands[1]));
9900 return;
9902 #endif
9904 /* If we are to limit the number of things we put in the TOC and
9905 this is a symbol plus a constant we can add in one insn,
9906 just put the symbol in the TOC and add the constant. */
9907 if (GET_CODE (operands[1]) == CONST
9908 && TARGET_NO_SUM_IN_TOC
9909 && GET_CODE (XEXP (operands[1], 0)) == PLUS
9910 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
9911 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
9912 || SYMBOL_REF_P (XEXP (XEXP (operands[1], 0), 0)))
9913 && ! side_effects_p (operands[0]))
9915 rtx sym =
9916 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
9917 rtx other = XEXP (XEXP (operands[1], 0), 1);
9919 sym = force_reg (mode, sym);
9920 emit_insn (gen_add3_insn (operands[0], sym, other));
9921 return;
9924 operands[1] = force_const_mem (mode, operands[1]);
9926 if (TARGET_TOC
9927 && SYMBOL_REF_P (XEXP (operands[1], 0))
9928 && use_toc_relative_ref (XEXP (operands[1], 0), mode))
9930 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
9931 operands[0]);
9932 operands[1] = gen_const_mem (mode, tocref);
9933 set_mem_alias_set (operands[1], get_TOC_alias_set ());
9936 break;
9938 case E_TImode:
9939 if (!VECTOR_MEM_VSX_P (TImode))
9940 rs6000_eliminate_indexed_memrefs (operands);
9941 break;
9943 case E_PTImode:
9944 rs6000_eliminate_indexed_memrefs (operands);
9945 break;
9947 default:
9948 fatal_insn ("bad move", gen_rtx_SET (dest, source));
9951 /* Above, we may have called force_const_mem which may have returned
9952 an invalid address. If we can, fix this up; otherwise, reload will
9953 have to deal with it. */
9954 if (MEM_P (operands[1]))
9955 operands[1] = validize_mem (operands[1]);
9957 emit_insn (gen_rtx_SET (operands[0], operands[1]));
9960 /* Nonzero if we can use a floating-point register to pass this arg. */
9961 #define USE_FP_FOR_ARG_P(CUM,MODE) \
9962 (SCALAR_FLOAT_MODE_NOT_VECTOR_P (MODE) \
9963 && (CUM)->fregno <= FP_ARG_MAX_REG \
9964 && TARGET_HARD_FLOAT)
9966 /* Nonzero if we can use an AltiVec register to pass this arg. */
9967 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
9968 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
9969 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
9970 && TARGET_ALTIVEC_ABI \
9971 && (NAMED))
9973 /* Walk down the type tree of TYPE counting consecutive base elements.
9974 If *MODEP is VOIDmode, then set it to the first valid floating point
9975 or vector type. If a non-floating point or vector type is found, or
9976 if a floating point or vector type that doesn't match a non-VOIDmode
9977 *MODEP is found, then return -1, otherwise return the count in the
9978 sub-tree. */
9980 static int
9981 rs6000_aggregate_candidate (const_tree type, machine_mode *modep)
9983 machine_mode mode;
9984 HOST_WIDE_INT size;
9986 switch (TREE_CODE (type))
9988 case REAL_TYPE:
9989 mode = TYPE_MODE (type);
9990 if (!SCALAR_FLOAT_MODE_P (mode))
9991 return -1;
9993 if (*modep == VOIDmode)
9994 *modep = mode;
9996 if (*modep == mode)
9997 return 1;
9999 break;
10001 case COMPLEX_TYPE:
10002 mode = TYPE_MODE (TREE_TYPE (type));
10003 if (!SCALAR_FLOAT_MODE_P (mode))
10004 return -1;
10006 if (*modep == VOIDmode)
10007 *modep = mode;
10009 if (*modep == mode)
10010 return 2;
10012 break;
10014 case VECTOR_TYPE:
10015 if (!TARGET_ALTIVEC_ABI || !TARGET_ALTIVEC)
10016 return -1;
10018 /* Use V4SImode as representative of all 128-bit vector types. */
10019 size = int_size_in_bytes (type);
10020 switch (size)
10022 case 16:
10023 mode = V4SImode;
10024 break;
10025 default:
10026 return -1;
10029 if (*modep == VOIDmode)
10030 *modep = mode;
10032 /* Vector modes are considered to be opaque: two vectors are
10033 equivalent for the purposes of being homogeneous aggregates
10034 if they are the same size. */
10035 if (*modep == mode)
10036 return 1;
10038 break;
10040 case ARRAY_TYPE:
10042 int count;
10043 tree index = TYPE_DOMAIN (type);
10045 /* Can't handle incomplete types nor sizes that are not
10046 fixed. */
10047 if (!COMPLETE_TYPE_P (type)
10048 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10049 return -1;
10051 count = rs6000_aggregate_candidate (TREE_TYPE (type), modep);
10052 if (count == -1
10053 || !index
10054 || !TYPE_MAX_VALUE (index)
10055 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
10056 || !TYPE_MIN_VALUE (index)
10057 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
10058 || count < 0)
10059 return -1;
10061 count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
10062 - tree_to_uhwi (TYPE_MIN_VALUE (index)));
10064 /* There must be no padding. */
10065 if (wi::to_wide (TYPE_SIZE (type))
10066 != count * GET_MODE_BITSIZE (*modep))
10067 return -1;
10069 return count;
10072 case RECORD_TYPE:
10074 int count = 0;
10075 int sub_count;
10076 tree field;
10078 /* Can't handle incomplete types nor sizes that are not
10079 fixed. */
10080 if (!COMPLETE_TYPE_P (type)
10081 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10082 return -1;
10084 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10086 if (TREE_CODE (field) != FIELD_DECL)
10087 continue;
10089 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10090 if (sub_count < 0)
10091 return -1;
10092 count += sub_count;
10095 /* There must be no padding. */
10096 if (wi::to_wide (TYPE_SIZE (type))
10097 != count * GET_MODE_BITSIZE (*modep))
10098 return -1;
10100 return count;
10103 case UNION_TYPE:
10104 case QUAL_UNION_TYPE:
10106 /* These aren't very interesting except in a degenerate case. */
10107 int count = 0;
10108 int sub_count;
10109 tree field;
10111 /* Can't handle incomplete types nor sizes that are not
10112 fixed. */
10113 if (!COMPLETE_TYPE_P (type)
10114 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10115 return -1;
10117 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10119 if (TREE_CODE (field) != FIELD_DECL)
10120 continue;
10122 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10123 if (sub_count < 0)
10124 return -1;
10125 count = count > sub_count ? count : sub_count;
10128 /* There must be no padding. */
10129 if (wi::to_wide (TYPE_SIZE (type))
10130 != count * GET_MODE_BITSIZE (*modep))
10131 return -1;
10133 return count;
10136 default:
10137 break;
10140 return -1;
10143 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
10144 float or vector aggregate that shall be passed in FP/vector registers
10145 according to the ELFv2 ABI, return the homogeneous element mode in
10146 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
10148 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
10150 static bool
10151 rs6000_discover_homogeneous_aggregate (machine_mode mode, const_tree type,
10152 machine_mode *elt_mode,
10153 int *n_elts)
10155 /* Note that we do not accept complex types at the top level as
10156 homogeneous aggregates; these types are handled via the
10157 targetm.calls.split_complex_arg mechanism. Complex types
10158 can be elements of homogeneous aggregates, however. */
10159 if (TARGET_HARD_FLOAT && DEFAULT_ABI == ABI_ELFv2 && type
10160 && AGGREGATE_TYPE_P (type))
10162 machine_mode field_mode = VOIDmode;
10163 int field_count = rs6000_aggregate_candidate (type, &field_mode);
10165 if (field_count > 0)
10167 int reg_size = ALTIVEC_OR_VSX_VECTOR_MODE (field_mode) ? 16 : 8;
10168 int field_size = ROUND_UP (GET_MODE_SIZE (field_mode), reg_size);
10170 /* The ELFv2 ABI allows homogeneous aggregates to occupy
10171 up to AGGR_ARG_NUM_REG registers. */
10172 if (field_count * field_size <= AGGR_ARG_NUM_REG * reg_size)
10174 if (elt_mode)
10175 *elt_mode = field_mode;
10176 if (n_elts)
10177 *n_elts = field_count;
10178 return true;
10183 if (elt_mode)
10184 *elt_mode = mode;
10185 if (n_elts)
10186 *n_elts = 1;
10187 return false;
10190 /* Return a nonzero value to say to return the function value in
10191 memory, just as large structures are always returned. TYPE will be
10192 the data type of the value, and FNTYPE will be the type of the
10193 function doing the returning, or @code{NULL} for libcalls.
10195 The AIX ABI for the RS/6000 specifies that all structures are
10196 returned in memory. The Darwin ABI does the same.
10198 For the Darwin 64 Bit ABI, a function result can be returned in
10199 registers or in memory, depending on the size of the return data
10200 type. If it is returned in registers, the value occupies the same
10201 registers as it would if it were the first and only function
10202 argument. Otherwise, the function places its result in memory at
10203 the location pointed to by GPR3.
10205 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
10206 but a draft put them in memory, and GCC used to implement the draft
10207 instead of the final standard. Therefore, aix_struct_return
10208 controls this instead of DEFAULT_ABI; V.4 targets needing backward
10209 compatibility can change DRAFT_V4_STRUCT_RET to override the
10210 default, and -m switches get the final word. See
10211 rs6000_option_override_internal for more details.
10213 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
10214 long double support is enabled. These values are returned in memory.
10216 int_size_in_bytes returns -1 for variable size objects, which go in
10217 memory always. The cast to unsigned makes -1 > 8. */
10219 static bool
10220 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
10222 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
10223 if (TARGET_MACHO
10224 && rs6000_darwin64_abi
10225 && TREE_CODE (type) == RECORD_TYPE
10226 && int_size_in_bytes (type) > 0)
10228 CUMULATIVE_ARGS valcum;
10229 rtx valret;
10231 valcum.words = 0;
10232 valcum.fregno = FP_ARG_MIN_REG;
10233 valcum.vregno = ALTIVEC_ARG_MIN_REG;
10234 /* Do a trial code generation as if this were going to be passed
10235 as an argument; if any part goes in memory, we return NULL. */
10236 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
10237 if (valret)
10238 return false;
10239 /* Otherwise fall through to more conventional ABI rules. */
10242 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
10243 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type), type,
10244 NULL, NULL))
10245 return false;
10247 /* The ELFv2 ABI returns aggregates up to 16B in registers */
10248 if (DEFAULT_ABI == ABI_ELFv2 && AGGREGATE_TYPE_P (type)
10249 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) <= 16)
10250 return false;
10252 if (AGGREGATE_TYPE_P (type)
10253 && (aix_struct_return
10254 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
10255 return true;
10257 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
10258 modes only exist for GCC vector types if -maltivec. */
10259 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
10260 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
10261 return false;
10263 /* Return synthetic vectors in memory. */
10264 if (TREE_CODE (type) == VECTOR_TYPE
10265 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
10267 static bool warned_for_return_big_vectors = false;
10268 if (!warned_for_return_big_vectors)
10270 warning (OPT_Wpsabi, "GCC vector returned by reference: "
10271 "non-standard ABI extension with no compatibility "
10272 "guarantee");
10273 warned_for_return_big_vectors = true;
10275 return true;
10278 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
10279 && FLOAT128_IEEE_P (TYPE_MODE (type)))
10280 return true;
10282 return false;
10285 /* Specify whether values returned in registers should be at the most
10286 significant end of a register. We want aggregates returned by
10287 value to match the way aggregates are passed to functions. */
10289 static bool
10290 rs6000_return_in_msb (const_tree valtype)
10292 return (DEFAULT_ABI == ABI_ELFv2
10293 && BYTES_BIG_ENDIAN
10294 && AGGREGATE_TYPE_P (valtype)
10295 && (rs6000_function_arg_padding (TYPE_MODE (valtype), valtype)
10296 == PAD_UPWARD));
10299 #ifdef HAVE_AS_GNU_ATTRIBUTE
10300 /* Return TRUE if a call to function FNDECL may be one that
10301 potentially affects the function calling ABI of the object file. */
10303 static bool
10304 call_ABI_of_interest (tree fndecl)
10306 if (rs6000_gnu_attr && symtab->state == EXPANSION)
10308 struct cgraph_node *c_node;
10310 /* Libcalls are always interesting. */
10311 if (fndecl == NULL_TREE)
10312 return true;
10314 /* Any call to an external function is interesting. */
10315 if (DECL_EXTERNAL (fndecl))
10316 return true;
10318 /* Interesting functions that we are emitting in this object file. */
10319 c_node = cgraph_node::get (fndecl);
10320 c_node = c_node->ultimate_alias_target ();
10321 return !c_node->only_called_directly_p ();
10323 return false;
10325 #endif
10327 /* Initialize a variable CUM of type CUMULATIVE_ARGS
10328 for a call to a function whose data type is FNTYPE.
10329 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
10331 For incoming args we set the number of arguments in the prototype large
10332 so we never return a PARALLEL. */
10334 void
10335 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
10336 rtx libname ATTRIBUTE_UNUSED, int incoming,
10337 int libcall, int n_named_args,
10338 tree fndecl,
10339 machine_mode return_mode ATTRIBUTE_UNUSED)
10341 static CUMULATIVE_ARGS zero_cumulative;
10343 *cum = zero_cumulative;
10344 cum->words = 0;
10345 cum->fregno = FP_ARG_MIN_REG;
10346 cum->vregno = ALTIVEC_ARG_MIN_REG;
10347 cum->prototype = (fntype && prototype_p (fntype));
10348 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
10349 ? CALL_LIBCALL : CALL_NORMAL);
10350 cum->sysv_gregno = GP_ARG_MIN_REG;
10351 cum->stdarg = stdarg_p (fntype);
10352 cum->libcall = libcall;
10354 cum->nargs_prototype = 0;
10355 if (incoming || cum->prototype)
10356 cum->nargs_prototype = n_named_args;
10358 /* Check for a longcall attribute. */
10359 if ((!fntype && rs6000_default_long_calls)
10360 || (fntype
10361 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
10362 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
10363 cum->call_cookie |= CALL_LONG;
10364 else if (DEFAULT_ABI != ABI_DARWIN)
10366 bool is_local = (fndecl
10367 && !DECL_EXTERNAL (fndecl)
10368 && !DECL_WEAK (fndecl)
10369 && (*targetm.binds_local_p) (fndecl));
10370 if (is_local)
10372 else if (flag_plt)
10374 if (fntype
10375 && lookup_attribute ("noplt", TYPE_ATTRIBUTES (fntype)))
10376 cum->call_cookie |= CALL_LONG;
10378 else
10380 if (!(fntype
10381 && lookup_attribute ("plt", TYPE_ATTRIBUTES (fntype))))
10382 cum->call_cookie |= CALL_LONG;
10386 if (TARGET_DEBUG_ARG)
10388 fprintf (stderr, "\ninit_cumulative_args:");
10389 if (fntype)
10391 tree ret_type = TREE_TYPE (fntype);
10392 fprintf (stderr, " ret code = %s,",
10393 get_tree_code_name (TREE_CODE (ret_type)));
10396 if (cum->call_cookie & CALL_LONG)
10397 fprintf (stderr, " longcall,");
10399 fprintf (stderr, " proto = %d, nargs = %d\n",
10400 cum->prototype, cum->nargs_prototype);
10403 #ifdef HAVE_AS_GNU_ATTRIBUTE
10404 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4))
10406 cum->escapes = call_ABI_of_interest (fndecl);
10407 if (cum->escapes)
10409 tree return_type;
10411 if (fntype)
10413 return_type = TREE_TYPE (fntype);
10414 return_mode = TYPE_MODE (return_type);
10416 else
10417 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
10419 if (return_type != NULL)
10421 if (TREE_CODE (return_type) == RECORD_TYPE
10422 && TYPE_TRANSPARENT_AGGR (return_type))
10424 return_type = TREE_TYPE (first_field (return_type));
10425 return_mode = TYPE_MODE (return_type);
10427 if (AGGREGATE_TYPE_P (return_type)
10428 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
10429 <= 8))
10430 rs6000_returns_struct = true;
10432 if (SCALAR_FLOAT_MODE_P (return_mode))
10434 rs6000_passes_float = true;
10435 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
10436 && (FLOAT128_IBM_P (return_mode)
10437 || FLOAT128_IEEE_P (return_mode)
10438 || (return_type != NULL
10439 && (TYPE_MAIN_VARIANT (return_type)
10440 == long_double_type_node))))
10441 rs6000_passes_long_double = true;
10443 /* Note if we passed or return a IEEE 128-bit type. We changed
10444 the mangling for these types, and we may need to make an alias
10445 with the old mangling. */
10446 if (FLOAT128_IEEE_P (return_mode))
10447 rs6000_passes_ieee128 = true;
10449 if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode))
10450 rs6000_passes_vector = true;
10453 #endif
10455 if (fntype
10456 && !TARGET_ALTIVEC
10457 && TARGET_ALTIVEC_ABI
10458 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
10460 error ("cannot return value in vector register because"
10461 " altivec instructions are disabled, use %qs"
10462 " to enable them", "-maltivec");
10466 /* The mode the ABI uses for a word. This is not the same as word_mode
10467 for -m32 -mpowerpc64. This is used to implement various target hooks. */
10469 static scalar_int_mode
10470 rs6000_abi_word_mode (void)
10472 return TARGET_32BIT ? SImode : DImode;
10475 /* Implement the TARGET_OFFLOAD_OPTIONS hook. */
10476 static char *
10477 rs6000_offload_options (void)
10479 if (TARGET_64BIT)
10480 return xstrdup ("-foffload-abi=lp64");
10481 else
10482 return xstrdup ("-foffload-abi=ilp32");
10485 /* On rs6000, function arguments are promoted, as are function return
10486 values. */
10488 static machine_mode
10489 rs6000_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
10490 machine_mode mode,
10491 int *punsignedp ATTRIBUTE_UNUSED,
10492 const_tree, int)
10494 PROMOTE_MODE (mode, *punsignedp, type);
10496 return mode;
10499 /* Return true if TYPE must be passed on the stack and not in registers. */
10501 static bool
10502 rs6000_must_pass_in_stack (machine_mode mode, const_tree type)
10504 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2 || TARGET_64BIT)
10505 return must_pass_in_stack_var_size (mode, type);
10506 else
10507 return must_pass_in_stack_var_size_or_pad (mode, type);
10510 static inline bool
10511 is_complex_IBM_long_double (machine_mode mode)
10513 return mode == ICmode || (mode == TCmode && FLOAT128_IBM_P (TCmode));
10516 /* Whether ABI_V4 passes MODE args to a function in floating point
10517 registers. */
10519 static bool
10520 abi_v4_pass_in_fpr (machine_mode mode, bool named)
10522 if (!TARGET_HARD_FLOAT)
10523 return false;
10524 if (mode == DFmode)
10525 return true;
10526 if (mode == SFmode && named)
10527 return true;
10528 /* ABI_V4 passes complex IBM long double in 8 gprs.
10529 Stupid, but we can't change the ABI now. */
10530 if (is_complex_IBM_long_double (mode))
10531 return false;
10532 if (FLOAT128_2REG_P (mode))
10533 return true;
10534 if (DECIMAL_FLOAT_MODE_P (mode))
10535 return true;
10536 return false;
10539 /* Implement TARGET_FUNCTION_ARG_PADDING.
10541 For the AIX ABI structs are always stored left shifted in their
10542 argument slot. */
10544 static pad_direction
10545 rs6000_function_arg_padding (machine_mode mode, const_tree type)
10547 #ifndef AGGREGATE_PADDING_FIXED
10548 #define AGGREGATE_PADDING_FIXED 0
10549 #endif
10550 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
10551 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
10552 #endif
10554 if (!AGGREGATE_PADDING_FIXED)
10556 /* GCC used to pass structures of the same size as integer types as
10557 if they were in fact integers, ignoring TARGET_FUNCTION_ARG_PADDING.
10558 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
10559 passed padded downward, except that -mstrict-align further
10560 muddied the water in that multi-component structures of 2 and 4
10561 bytes in size were passed padded upward.
10563 The following arranges for best compatibility with previous
10564 versions of gcc, but removes the -mstrict-align dependency. */
10565 if (BYTES_BIG_ENDIAN)
10567 HOST_WIDE_INT size = 0;
10569 if (mode == BLKmode)
10571 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
10572 size = int_size_in_bytes (type);
10574 else
10575 size = GET_MODE_SIZE (mode);
10577 if (size == 1 || size == 2 || size == 4)
10578 return PAD_DOWNWARD;
10580 return PAD_UPWARD;
10583 if (AGGREGATES_PAD_UPWARD_ALWAYS)
10585 if (type != 0 && AGGREGATE_TYPE_P (type))
10586 return PAD_UPWARD;
10589 /* Fall back to the default. */
10590 return default_function_arg_padding (mode, type);
10593 /* If defined, a C expression that gives the alignment boundary, in bits,
10594 of an argument with the specified mode and type. If it is not defined,
10595 PARM_BOUNDARY is used for all arguments.
10597 V.4 wants long longs and doubles to be double word aligned. Just
10598 testing the mode size is a boneheaded way to do this as it means
10599 that other types such as complex int are also double word aligned.
10600 However, we're stuck with this because changing the ABI might break
10601 existing library interfaces.
10603 Quadword align Altivec/VSX vectors.
10604 Quadword align large synthetic vector types. */
10606 static unsigned int
10607 rs6000_function_arg_boundary (machine_mode mode, const_tree type)
10609 machine_mode elt_mode;
10610 int n_elts;
10612 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
10614 if (DEFAULT_ABI == ABI_V4
10615 && (GET_MODE_SIZE (mode) == 8
10616 || (TARGET_HARD_FLOAT
10617 && !is_complex_IBM_long_double (mode)
10618 && FLOAT128_2REG_P (mode))))
10619 return 64;
10620 else if (FLOAT128_VECTOR_P (mode))
10621 return 128;
10622 else if (type && TREE_CODE (type) == VECTOR_TYPE
10623 && int_size_in_bytes (type) >= 8
10624 && int_size_in_bytes (type) < 16)
10625 return 64;
10626 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
10627 || (type && TREE_CODE (type) == VECTOR_TYPE
10628 && int_size_in_bytes (type) >= 16))
10629 return 128;
10631 /* Aggregate types that need > 8 byte alignment are quadword-aligned
10632 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
10633 -mcompat-align-parm is used. */
10634 if (((DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm)
10635 || DEFAULT_ABI == ABI_ELFv2)
10636 && type && TYPE_ALIGN (type) > 64)
10638 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
10639 or homogeneous float/vector aggregates here. We already handled
10640 vector aggregates above, but still need to check for float here. */
10641 bool aggregate_p = (AGGREGATE_TYPE_P (type)
10642 && !SCALAR_FLOAT_MODE_P (elt_mode));
10644 /* We used to check for BLKmode instead of the above aggregate type
10645 check. Warn when this results in any difference to the ABI. */
10646 if (aggregate_p != (mode == BLKmode))
10648 static bool warned;
10649 if (!warned && warn_psabi)
10651 warned = true;
10652 inform (input_location,
10653 "the ABI of passing aggregates with %d-byte alignment"
10654 " has changed in GCC 5",
10655 (int) TYPE_ALIGN (type) / BITS_PER_UNIT);
10659 if (aggregate_p)
10660 return 128;
10663 /* Similar for the Darwin64 ABI. Note that for historical reasons we
10664 implement the "aggregate type" check as a BLKmode check here; this
10665 means certain aggregate types are in fact not aligned. */
10666 if (TARGET_MACHO && rs6000_darwin64_abi
10667 && mode == BLKmode
10668 && type && TYPE_ALIGN (type) > 64)
10669 return 128;
10671 return PARM_BOUNDARY;
10674 /* The offset in words to the start of the parameter save area. */
10676 static unsigned int
10677 rs6000_parm_offset (void)
10679 return (DEFAULT_ABI == ABI_V4 ? 2
10680 : DEFAULT_ABI == ABI_ELFv2 ? 4
10681 : 6);
10684 /* For a function parm of MODE and TYPE, return the starting word in
10685 the parameter area. NWORDS of the parameter area are already used. */
10687 static unsigned int
10688 rs6000_parm_start (machine_mode mode, const_tree type,
10689 unsigned int nwords)
10691 unsigned int align;
10693 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
10694 return nwords + (-(rs6000_parm_offset () + nwords) & align);
10697 /* Compute the size (in words) of a function argument. */
10699 static unsigned long
10700 rs6000_arg_size (machine_mode mode, const_tree type)
10702 unsigned long size;
10704 if (mode != BLKmode)
10705 size = GET_MODE_SIZE (mode);
10706 else
10707 size = int_size_in_bytes (type);
10709 if (TARGET_32BIT)
10710 return (size + 3) >> 2;
10711 else
10712 return (size + 7) >> 3;
10715 /* Use this to flush pending int fields. */
10717 static void
10718 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
10719 HOST_WIDE_INT bitpos, int final)
10721 unsigned int startbit, endbit;
10722 int intregs, intoffset;
10724 /* Handle the situations where a float is taking up the first half
10725 of the GPR, and the other half is empty (typically due to
10726 alignment restrictions). We can detect this by a 8-byte-aligned
10727 int field, or by seeing that this is the final flush for this
10728 argument. Count the word and continue on. */
10729 if (cum->floats_in_gpr == 1
10730 && (cum->intoffset % 64 == 0
10731 || (cum->intoffset == -1 && final)))
10733 cum->words++;
10734 cum->floats_in_gpr = 0;
10737 if (cum->intoffset == -1)
10738 return;
10740 intoffset = cum->intoffset;
10741 cum->intoffset = -1;
10742 cum->floats_in_gpr = 0;
10744 if (intoffset % BITS_PER_WORD != 0)
10746 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
10747 if (!int_mode_for_size (bits, 0).exists ())
10749 /* We couldn't find an appropriate mode, which happens,
10750 e.g., in packed structs when there are 3 bytes to load.
10751 Back intoffset back to the beginning of the word in this
10752 case. */
10753 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
10757 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
10758 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
10759 intregs = (endbit - startbit) / BITS_PER_WORD;
10760 cum->words += intregs;
10761 /* words should be unsigned. */
10762 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
10764 int pad = (endbit/BITS_PER_WORD) - cum->words;
10765 cum->words += pad;
10769 /* The darwin64 ABI calls for us to recurse down through structs,
10770 looking for elements passed in registers. Unfortunately, we have
10771 to track int register count here also because of misalignments
10772 in powerpc alignment mode. */
10774 static void
10775 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
10776 const_tree type,
10777 HOST_WIDE_INT startbitpos)
10779 tree f;
10781 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
10782 if (TREE_CODE (f) == FIELD_DECL)
10784 HOST_WIDE_INT bitpos = startbitpos;
10785 tree ftype = TREE_TYPE (f);
10786 machine_mode mode;
10787 if (ftype == error_mark_node)
10788 continue;
10789 mode = TYPE_MODE (ftype);
10791 if (DECL_SIZE (f) != 0
10792 && tree_fits_uhwi_p (bit_position (f)))
10793 bitpos += int_bit_position (f);
10795 /* ??? FIXME: else assume zero offset. */
10797 if (TREE_CODE (ftype) == RECORD_TYPE)
10798 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
10799 else if (USE_FP_FOR_ARG_P (cum, mode))
10801 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
10802 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
10803 cum->fregno += n_fpregs;
10804 /* Single-precision floats present a special problem for
10805 us, because they are smaller than an 8-byte GPR, and so
10806 the structure-packing rules combined with the standard
10807 varargs behavior mean that we want to pack float/float
10808 and float/int combinations into a single register's
10809 space. This is complicated by the arg advance flushing,
10810 which works on arbitrarily large groups of int-type
10811 fields. */
10812 if (mode == SFmode)
10814 if (cum->floats_in_gpr == 1)
10816 /* Two floats in a word; count the word and reset
10817 the float count. */
10818 cum->words++;
10819 cum->floats_in_gpr = 0;
10821 else if (bitpos % 64 == 0)
10823 /* A float at the beginning of an 8-byte word;
10824 count it and put off adjusting cum->words until
10825 we see if a arg advance flush is going to do it
10826 for us. */
10827 cum->floats_in_gpr++;
10829 else
10831 /* The float is at the end of a word, preceded
10832 by integer fields, so the arg advance flush
10833 just above has already set cum->words and
10834 everything is taken care of. */
10837 else
10838 cum->words += n_fpregs;
10840 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
10842 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
10843 cum->vregno++;
10844 cum->words += 2;
10846 else if (cum->intoffset == -1)
10847 cum->intoffset = bitpos;
10851 /* Check for an item that needs to be considered specially under the darwin 64
10852 bit ABI. These are record types where the mode is BLK or the structure is
10853 8 bytes in size. */
10854 static int
10855 rs6000_darwin64_struct_check_p (machine_mode mode, const_tree type)
10857 return rs6000_darwin64_abi
10858 && ((mode == BLKmode
10859 && TREE_CODE (type) == RECORD_TYPE
10860 && int_size_in_bytes (type) > 0)
10861 || (type && TREE_CODE (type) == RECORD_TYPE
10862 && int_size_in_bytes (type) == 8)) ? 1 : 0;
10865 /* Update the data in CUM to advance over an argument
10866 of mode MODE and data type TYPE.
10867 (TYPE is null for libcalls where that information may not be available.)
10869 Note that for args passed by reference, function_arg will be called
10870 with MODE and TYPE set to that of the pointer to the arg, not the arg
10871 itself. */
10873 static void
10874 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, machine_mode mode,
10875 const_tree type, bool named, int depth)
10877 machine_mode elt_mode;
10878 int n_elts;
10880 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
10882 /* Only tick off an argument if we're not recursing. */
10883 if (depth == 0)
10884 cum->nargs_prototype--;
10886 #ifdef HAVE_AS_GNU_ATTRIBUTE
10887 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4)
10888 && cum->escapes)
10890 if (SCALAR_FLOAT_MODE_P (mode))
10892 rs6000_passes_float = true;
10893 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
10894 && (FLOAT128_IBM_P (mode)
10895 || FLOAT128_IEEE_P (mode)
10896 || (type != NULL
10897 && TYPE_MAIN_VARIANT (type) == long_double_type_node)))
10898 rs6000_passes_long_double = true;
10900 /* Note if we passed or return a IEEE 128-bit type. We changed the
10901 mangling for these types, and we may need to make an alias with
10902 the old mangling. */
10903 if (FLOAT128_IEEE_P (mode))
10904 rs6000_passes_ieee128 = true;
10906 if (named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
10907 rs6000_passes_vector = true;
10909 #endif
10911 if (TARGET_ALTIVEC_ABI
10912 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
10913 || (type && TREE_CODE (type) == VECTOR_TYPE
10914 && int_size_in_bytes (type) == 16)))
10916 bool stack = false;
10918 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
10920 cum->vregno += n_elts;
10922 if (!TARGET_ALTIVEC)
10923 error ("cannot pass argument in vector register because"
10924 " altivec instructions are disabled, use %qs"
10925 " to enable them", "-maltivec");
10927 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
10928 even if it is going to be passed in a vector register.
10929 Darwin does the same for variable-argument functions. */
10930 if (((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
10931 && TARGET_64BIT)
10932 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
10933 stack = true;
10935 else
10936 stack = true;
10938 if (stack)
10940 int align;
10942 /* Vector parameters must be 16-byte aligned. In 32-bit
10943 mode this means we need to take into account the offset
10944 to the parameter save area. In 64-bit mode, they just
10945 have to start on an even word, since the parameter save
10946 area is 16-byte aligned. */
10947 if (TARGET_32BIT)
10948 align = -(rs6000_parm_offset () + cum->words) & 3;
10949 else
10950 align = cum->words & 1;
10951 cum->words += align + rs6000_arg_size (mode, type);
10953 if (TARGET_DEBUG_ARG)
10955 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
10956 cum->words, align);
10957 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
10958 cum->nargs_prototype, cum->prototype,
10959 GET_MODE_NAME (mode));
10963 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
10965 int size = int_size_in_bytes (type);
10966 /* Variable sized types have size == -1 and are
10967 treated as if consisting entirely of ints.
10968 Pad to 16 byte boundary if needed. */
10969 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
10970 && (cum->words % 2) != 0)
10971 cum->words++;
10972 /* For varargs, we can just go up by the size of the struct. */
10973 if (!named)
10974 cum->words += (size + 7) / 8;
10975 else
10977 /* It is tempting to say int register count just goes up by
10978 sizeof(type)/8, but this is wrong in a case such as
10979 { int; double; int; } [powerpc alignment]. We have to
10980 grovel through the fields for these too. */
10981 cum->intoffset = 0;
10982 cum->floats_in_gpr = 0;
10983 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
10984 rs6000_darwin64_record_arg_advance_flush (cum,
10985 size * BITS_PER_UNIT, 1);
10987 if (TARGET_DEBUG_ARG)
10989 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
10990 cum->words, TYPE_ALIGN (type), size);
10991 fprintf (stderr,
10992 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
10993 cum->nargs_prototype, cum->prototype,
10994 GET_MODE_NAME (mode));
10997 else if (DEFAULT_ABI == ABI_V4)
10999 if (abi_v4_pass_in_fpr (mode, named))
11001 /* _Decimal128 must use an even/odd register pair. This assumes
11002 that the register number is odd when fregno is odd. */
11003 if (mode == TDmode && (cum->fregno % 2) == 1)
11004 cum->fregno++;
11006 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11007 <= FP_ARG_V4_MAX_REG)
11008 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
11009 else
11011 cum->fregno = FP_ARG_V4_MAX_REG + 1;
11012 if (mode == DFmode || FLOAT128_IBM_P (mode)
11013 || mode == DDmode || mode == TDmode)
11014 cum->words += cum->words & 1;
11015 cum->words += rs6000_arg_size (mode, type);
11018 else
11020 int n_words = rs6000_arg_size (mode, type);
11021 int gregno = cum->sysv_gregno;
11023 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11024 As does any other 2 word item such as complex int due to a
11025 historical mistake. */
11026 if (n_words == 2)
11027 gregno += (1 - gregno) & 1;
11029 /* Multi-reg args are not split between registers and stack. */
11030 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11032 /* Long long is aligned on the stack. So are other 2 word
11033 items such as complex int due to a historical mistake. */
11034 if (n_words == 2)
11035 cum->words += cum->words & 1;
11036 cum->words += n_words;
11039 /* Note: continuing to accumulate gregno past when we've started
11040 spilling to the stack indicates the fact that we've started
11041 spilling to the stack to expand_builtin_saveregs. */
11042 cum->sysv_gregno = gregno + n_words;
11045 if (TARGET_DEBUG_ARG)
11047 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11048 cum->words, cum->fregno);
11049 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
11050 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
11051 fprintf (stderr, "mode = %4s, named = %d\n",
11052 GET_MODE_NAME (mode), named);
11055 else
11057 int n_words = rs6000_arg_size (mode, type);
11058 int start_words = cum->words;
11059 int align_words = rs6000_parm_start (mode, type, start_words);
11061 cum->words = align_words + n_words;
11063 if (SCALAR_FLOAT_MODE_P (elt_mode) && TARGET_HARD_FLOAT)
11065 /* _Decimal128 must be passed in an even/odd float register pair.
11066 This assumes that the register number is odd when fregno is
11067 odd. */
11068 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11069 cum->fregno++;
11070 cum->fregno += n_elts * ((GET_MODE_SIZE (elt_mode) + 7) >> 3);
11073 if (TARGET_DEBUG_ARG)
11075 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11076 cum->words, cum->fregno);
11077 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
11078 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
11079 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
11080 named, align_words - start_words, depth);
11085 static void
11086 rs6000_function_arg_advance (cumulative_args_t cum, machine_mode mode,
11087 const_tree type, bool named)
11089 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
11093 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
11094 structure between cum->intoffset and bitpos to integer registers. */
11096 static void
11097 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
11098 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
11100 machine_mode mode;
11101 unsigned int regno;
11102 unsigned int startbit, endbit;
11103 int this_regno, intregs, intoffset;
11104 rtx reg;
11106 if (cum->intoffset == -1)
11107 return;
11109 intoffset = cum->intoffset;
11110 cum->intoffset = -1;
11112 /* If this is the trailing part of a word, try to only load that
11113 much into the register. Otherwise load the whole register. Note
11114 that in the latter case we may pick up unwanted bits. It's not a
11115 problem at the moment but may wish to revisit. */
11117 if (intoffset % BITS_PER_WORD != 0)
11119 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
11120 if (!int_mode_for_size (bits, 0).exists (&mode))
11122 /* We couldn't find an appropriate mode, which happens,
11123 e.g., in packed structs when there are 3 bytes to load.
11124 Back intoffset back to the beginning of the word in this
11125 case. */
11126 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11127 mode = word_mode;
11130 else
11131 mode = word_mode;
11133 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11134 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11135 intregs = (endbit - startbit) / BITS_PER_WORD;
11136 this_regno = cum->words + intoffset / BITS_PER_WORD;
11138 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
11139 cum->use_stack = 1;
11141 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
11142 if (intregs <= 0)
11143 return;
11145 intoffset /= BITS_PER_UNIT;
11148 regno = GP_ARG_MIN_REG + this_regno;
11149 reg = gen_rtx_REG (mode, regno);
11150 rvec[(*k)++] =
11151 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
11153 this_regno += 1;
11154 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
11155 mode = word_mode;
11156 intregs -= 1;
11158 while (intregs > 0);
11161 /* Recursive workhorse for the following. */
11163 static void
11164 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
11165 HOST_WIDE_INT startbitpos, rtx rvec[],
11166 int *k)
11168 tree f;
11170 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11171 if (TREE_CODE (f) == FIELD_DECL)
11173 HOST_WIDE_INT bitpos = startbitpos;
11174 tree ftype = TREE_TYPE (f);
11175 machine_mode mode;
11176 if (ftype == error_mark_node)
11177 continue;
11178 mode = TYPE_MODE (ftype);
11180 if (DECL_SIZE (f) != 0
11181 && tree_fits_uhwi_p (bit_position (f)))
11182 bitpos += int_bit_position (f);
11184 /* ??? FIXME: else assume zero offset. */
11186 if (TREE_CODE (ftype) == RECORD_TYPE)
11187 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
11188 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode))
11190 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
11191 #if 0
11192 switch (mode)
11194 case E_SCmode: mode = SFmode; break;
11195 case E_DCmode: mode = DFmode; break;
11196 case E_TCmode: mode = TFmode; break;
11197 default: break;
11199 #endif
11200 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
11201 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
11203 gcc_assert (cum->fregno == FP_ARG_MAX_REG
11204 && (mode == TFmode || mode == TDmode));
11205 /* Long double or _Decimal128 split over regs and memory. */
11206 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
11207 cum->use_stack=1;
11209 rvec[(*k)++]
11210 = gen_rtx_EXPR_LIST (VOIDmode,
11211 gen_rtx_REG (mode, cum->fregno++),
11212 GEN_INT (bitpos / BITS_PER_UNIT));
11213 if (FLOAT128_2REG_P (mode))
11214 cum->fregno++;
11216 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11218 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
11219 rvec[(*k)++]
11220 = gen_rtx_EXPR_LIST (VOIDmode,
11221 gen_rtx_REG (mode, cum->vregno++),
11222 GEN_INT (bitpos / BITS_PER_UNIT));
11224 else if (cum->intoffset == -1)
11225 cum->intoffset = bitpos;
11229 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
11230 the register(s) to be used for each field and subfield of a struct
11231 being passed by value, along with the offset of where the
11232 register's value may be found in the block. FP fields go in FP
11233 register, vector fields go in vector registers, and everything
11234 else goes in int registers, packed as in memory.
11236 This code is also used for function return values. RETVAL indicates
11237 whether this is the case.
11239 Much of this is taken from the SPARC V9 port, which has a similar
11240 calling convention. */
11242 static rtx
11243 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
11244 bool named, bool retval)
11246 rtx rvec[FIRST_PSEUDO_REGISTER];
11247 int k = 1, kbase = 1;
11248 HOST_WIDE_INT typesize = int_size_in_bytes (type);
11249 /* This is a copy; modifications are not visible to our caller. */
11250 CUMULATIVE_ARGS copy_cum = *orig_cum;
11251 CUMULATIVE_ARGS *cum = &copy_cum;
11253 /* Pad to 16 byte boundary if needed. */
11254 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11255 && (cum->words % 2) != 0)
11256 cum->words++;
11258 cum->intoffset = 0;
11259 cum->use_stack = 0;
11260 cum->named = named;
11262 /* Put entries into rvec[] for individual FP and vector fields, and
11263 for the chunks of memory that go in int regs. Note we start at
11264 element 1; 0 is reserved for an indication of using memory, and
11265 may or may not be filled in below. */
11266 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
11267 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
11269 /* If any part of the struct went on the stack put all of it there.
11270 This hack is because the generic code for
11271 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
11272 parts of the struct are not at the beginning. */
11273 if (cum->use_stack)
11275 if (retval)
11276 return NULL_RTX; /* doesn't go in registers at all */
11277 kbase = 0;
11278 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11280 if (k > 1 || cum->use_stack)
11281 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
11282 else
11283 return NULL_RTX;
11286 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
11288 static rtx
11289 rs6000_mixed_function_arg (machine_mode mode, const_tree type,
11290 int align_words)
11292 int n_units;
11293 int i, k;
11294 rtx rvec[GP_ARG_NUM_REG + 1];
11296 if (align_words >= GP_ARG_NUM_REG)
11297 return NULL_RTX;
11299 n_units = rs6000_arg_size (mode, type);
11301 /* Optimize the simple case where the arg fits in one gpr, except in
11302 the case of BLKmode due to assign_parms assuming that registers are
11303 BITS_PER_WORD wide. */
11304 if (n_units == 0
11305 || (n_units == 1 && mode != BLKmode))
11306 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11308 k = 0;
11309 if (align_words + n_units > GP_ARG_NUM_REG)
11310 /* Not all of the arg fits in gprs. Say that it goes in memory too,
11311 using a magic NULL_RTX component.
11312 This is not strictly correct. Only some of the arg belongs in
11313 memory, not all of it. However, the normal scheme using
11314 function_arg_partial_nregs can result in unusual subregs, eg.
11315 (subreg:SI (reg:DF) 4), which are not handled well. The code to
11316 store the whole arg to memory is often more efficient than code
11317 to store pieces, and we know that space is available in the right
11318 place for the whole arg. */
11319 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11321 i = 0;
11324 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
11325 rtx off = GEN_INT (i++ * 4);
11326 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11328 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
11330 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
11333 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
11334 but must also be copied into the parameter save area starting at
11335 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
11336 to the GPRs and/or memory. Return the number of elements used. */
11338 static int
11339 rs6000_psave_function_arg (machine_mode mode, const_tree type,
11340 int align_words, rtx *rvec)
11342 int k = 0;
11344 if (align_words < GP_ARG_NUM_REG)
11346 int n_words = rs6000_arg_size (mode, type);
11348 if (align_words + n_words > GP_ARG_NUM_REG
11349 || mode == BLKmode
11350 || (TARGET_32BIT && TARGET_POWERPC64))
11352 /* If this is partially on the stack, then we only
11353 include the portion actually in registers here. */
11354 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
11355 int i = 0;
11357 if (align_words + n_words > GP_ARG_NUM_REG)
11359 /* Not all of the arg fits in gprs. Say that it goes in memory
11360 too, using a magic NULL_RTX component. Also see comment in
11361 rs6000_mixed_function_arg for why the normal
11362 function_arg_partial_nregs scheme doesn't work in this case. */
11363 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11368 rtx r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
11369 rtx off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
11370 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11372 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
11374 else
11376 /* The whole arg fits in gprs. */
11377 rtx r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11378 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
11381 else
11383 /* It's entirely in memory. */
11384 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11387 return k;
11390 /* RVEC is a vector of K components of an argument of mode MODE.
11391 Construct the final function_arg return value from it. */
11393 static rtx
11394 rs6000_finish_function_arg (machine_mode mode, rtx *rvec, int k)
11396 gcc_assert (k >= 1);
11398 /* Avoid returning a PARALLEL in the trivial cases. */
11399 if (k == 1)
11401 if (XEXP (rvec[0], 0) == NULL_RTX)
11402 return NULL_RTX;
11404 if (GET_MODE (XEXP (rvec[0], 0)) == mode)
11405 return XEXP (rvec[0], 0);
11408 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
11411 /* Determine where to put an argument to a function.
11412 Value is zero to push the argument on the stack,
11413 or a hard register in which to store the argument.
11415 MODE is the argument's machine mode.
11416 TYPE is the data type of the argument (as a tree).
11417 This is null for libcalls where that information may
11418 not be available.
11419 CUM is a variable of type CUMULATIVE_ARGS which gives info about
11420 the preceding args and about the function being called. It is
11421 not modified in this routine.
11422 NAMED is nonzero if this argument is a named parameter
11423 (otherwise it is an extra parameter matching an ellipsis).
11425 On RS/6000 the first eight words of non-FP are normally in registers
11426 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
11427 Under V.4, the first 8 FP args are in registers.
11429 If this is floating-point and no prototype is specified, we use
11430 both an FP and integer register (or possibly FP reg and stack). Library
11431 functions (when CALL_LIBCALL is set) always have the proper types for args,
11432 so we can pass the FP value just in one register. emit_library_function
11433 doesn't support PARALLEL anyway.
11435 Note that for args passed by reference, function_arg will be called
11436 with MODE and TYPE set to that of the pointer to the arg, not the arg
11437 itself. */
11439 static rtx
11440 rs6000_function_arg (cumulative_args_t cum_v, machine_mode mode,
11441 const_tree type, bool named)
11443 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11444 enum rs6000_abi abi = DEFAULT_ABI;
11445 machine_mode elt_mode;
11446 int n_elts;
11448 /* Return a marker to indicate whether CR1 needs to set or clear the
11449 bit that V.4 uses to say fp args were passed in registers.
11450 Assume that we don't need the marker for software floating point,
11451 or compiler generated library calls. */
11452 if (mode == VOIDmode)
11454 if (abi == ABI_V4
11455 && (cum->call_cookie & CALL_LIBCALL) == 0
11456 && (cum->stdarg
11457 || (cum->nargs_prototype < 0
11458 && (cum->prototype || TARGET_NO_PROTOTYPE)))
11459 && TARGET_HARD_FLOAT)
11460 return GEN_INT (cum->call_cookie
11461 | ((cum->fregno == FP_ARG_MIN_REG)
11462 ? CALL_V4_SET_FP_ARGS
11463 : CALL_V4_CLEAR_FP_ARGS));
11465 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
11468 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11470 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11472 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
11473 if (rslt != NULL_RTX)
11474 return rslt;
11475 /* Else fall through to usual handling. */
11478 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11480 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
11481 rtx r, off;
11482 int i, k = 0;
11484 /* Do we also need to pass this argument in the parameter save area?
11485 Library support functions for IEEE 128-bit are assumed to not need the
11486 value passed both in GPRs and in vector registers. */
11487 if (TARGET_64BIT && !cum->prototype
11488 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
11490 int align_words = ROUND_UP (cum->words, 2);
11491 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
11494 /* Describe where this argument goes in the vector registers. */
11495 for (i = 0; i < n_elts && cum->vregno + i <= ALTIVEC_ARG_MAX_REG; i++)
11497 r = gen_rtx_REG (elt_mode, cum->vregno + i);
11498 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
11499 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11502 return rs6000_finish_function_arg (mode, rvec, k);
11504 else if (TARGET_ALTIVEC_ABI
11505 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
11506 || (type && TREE_CODE (type) == VECTOR_TYPE
11507 && int_size_in_bytes (type) == 16)))
11509 if (named || abi == ABI_V4)
11510 return NULL_RTX;
11511 else
11513 /* Vector parameters to varargs functions under AIX or Darwin
11514 get passed in memory and possibly also in GPRs. */
11515 int align, align_words, n_words;
11516 machine_mode part_mode;
11518 /* Vector parameters must be 16-byte aligned. In 32-bit
11519 mode this means we need to take into account the offset
11520 to the parameter save area. In 64-bit mode, they just
11521 have to start on an even word, since the parameter save
11522 area is 16-byte aligned. */
11523 if (TARGET_32BIT)
11524 align = -(rs6000_parm_offset () + cum->words) & 3;
11525 else
11526 align = cum->words & 1;
11527 align_words = cum->words + align;
11529 /* Out of registers? Memory, then. */
11530 if (align_words >= GP_ARG_NUM_REG)
11531 return NULL_RTX;
11533 if (TARGET_32BIT && TARGET_POWERPC64)
11534 return rs6000_mixed_function_arg (mode, type, align_words);
11536 /* The vector value goes in GPRs. Only the part of the
11537 value in GPRs is reported here. */
11538 part_mode = mode;
11539 n_words = rs6000_arg_size (mode, type);
11540 if (align_words + n_words > GP_ARG_NUM_REG)
11541 /* Fortunately, there are only two possibilities, the value
11542 is either wholly in GPRs or half in GPRs and half not. */
11543 part_mode = DImode;
11545 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
11549 else if (abi == ABI_V4)
11551 if (abi_v4_pass_in_fpr (mode, named))
11553 /* _Decimal128 must use an even/odd register pair. This assumes
11554 that the register number is odd when fregno is odd. */
11555 if (mode == TDmode && (cum->fregno % 2) == 1)
11556 cum->fregno++;
11558 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11559 <= FP_ARG_V4_MAX_REG)
11560 return gen_rtx_REG (mode, cum->fregno);
11561 else
11562 return NULL_RTX;
11564 else
11566 int n_words = rs6000_arg_size (mode, type);
11567 int gregno = cum->sysv_gregno;
11569 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11570 As does any other 2 word item such as complex int due to a
11571 historical mistake. */
11572 if (n_words == 2)
11573 gregno += (1 - gregno) & 1;
11575 /* Multi-reg args are not split between registers and stack. */
11576 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11577 return NULL_RTX;
11579 if (TARGET_32BIT && TARGET_POWERPC64)
11580 return rs6000_mixed_function_arg (mode, type,
11581 gregno - GP_ARG_MIN_REG);
11582 return gen_rtx_REG (mode, gregno);
11585 else
11587 int align_words = rs6000_parm_start (mode, type, cum->words);
11589 /* _Decimal128 must be passed in an even/odd float register pair.
11590 This assumes that the register number is odd when fregno is odd. */
11591 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11592 cum->fregno++;
11594 if (USE_FP_FOR_ARG_P (cum, elt_mode)
11595 && !(TARGET_AIX && !TARGET_ELF
11596 && type != NULL && AGGREGATE_TYPE_P (type)))
11598 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
11599 rtx r, off;
11600 int i, k = 0;
11601 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
11602 int fpr_words;
11604 /* Do we also need to pass this argument in the parameter
11605 save area? */
11606 if (type && (cum->nargs_prototype <= 0
11607 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11608 && TARGET_XL_COMPAT
11609 && align_words >= GP_ARG_NUM_REG)))
11610 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
11612 /* Describe where this argument goes in the fprs. */
11613 for (i = 0; i < n_elts
11614 && cum->fregno + i * n_fpreg <= FP_ARG_MAX_REG; i++)
11616 /* Check if the argument is split over registers and memory.
11617 This can only ever happen for long double or _Decimal128;
11618 complex types are handled via split_complex_arg. */
11619 machine_mode fmode = elt_mode;
11620 if (cum->fregno + (i + 1) * n_fpreg > FP_ARG_MAX_REG + 1)
11622 gcc_assert (FLOAT128_2REG_P (fmode));
11623 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
11626 r = gen_rtx_REG (fmode, cum->fregno + i * n_fpreg);
11627 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
11628 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11631 /* If there were not enough FPRs to hold the argument, the rest
11632 usually goes into memory. However, if the current position
11633 is still within the register parameter area, a portion may
11634 actually have to go into GPRs.
11636 Note that it may happen that the portion of the argument
11637 passed in the first "half" of the first GPR was already
11638 passed in the last FPR as well.
11640 For unnamed arguments, we already set up GPRs to cover the
11641 whole argument in rs6000_psave_function_arg, so there is
11642 nothing further to do at this point. */
11643 fpr_words = (i * GET_MODE_SIZE (elt_mode)) / (TARGET_32BIT ? 4 : 8);
11644 if (i < n_elts && align_words + fpr_words < GP_ARG_NUM_REG
11645 && cum->nargs_prototype > 0)
11647 static bool warned;
11649 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
11650 int n_words = rs6000_arg_size (mode, type);
11652 align_words += fpr_words;
11653 n_words -= fpr_words;
11657 r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
11658 off = GEN_INT (fpr_words++ * GET_MODE_SIZE (rmode));
11659 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11661 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
11663 if (!warned && warn_psabi)
11665 warned = true;
11666 inform (input_location,
11667 "the ABI of passing homogeneous %<float%> aggregates"
11668 " has changed in GCC 5");
11672 return rs6000_finish_function_arg (mode, rvec, k);
11674 else if (align_words < GP_ARG_NUM_REG)
11676 if (TARGET_32BIT && TARGET_POWERPC64)
11677 return rs6000_mixed_function_arg (mode, type, align_words);
11679 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11681 else
11682 return NULL_RTX;
11686 /* For an arg passed partly in registers and partly in memory, this is
11687 the number of bytes passed in registers. For args passed entirely in
11688 registers or entirely in memory, zero. When an arg is described by a
11689 PARALLEL, perhaps using more than one register type, this function
11690 returns the number of bytes used by the first element of the PARALLEL. */
11692 static int
11693 rs6000_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
11694 tree type, bool named)
11696 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11697 bool passed_in_gprs = true;
11698 int ret = 0;
11699 int align_words;
11700 machine_mode elt_mode;
11701 int n_elts;
11703 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11705 if (DEFAULT_ABI == ABI_V4)
11706 return 0;
11708 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11710 /* If we are passing this arg in the fixed parameter save area (gprs or
11711 memory) as well as VRs, we do not use the partial bytes mechanism;
11712 instead, rs6000_function_arg will return a PARALLEL including a memory
11713 element as necessary. Library support functions for IEEE 128-bit are
11714 assumed to not need the value passed both in GPRs and in vector
11715 registers. */
11716 if (TARGET_64BIT && !cum->prototype
11717 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
11718 return 0;
11720 /* Otherwise, we pass in VRs only. Check for partial copies. */
11721 passed_in_gprs = false;
11722 if (cum->vregno + n_elts > ALTIVEC_ARG_MAX_REG + 1)
11723 ret = (ALTIVEC_ARG_MAX_REG + 1 - cum->vregno) * 16;
11726 /* In this complicated case we just disable the partial_nregs code. */
11727 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11728 return 0;
11730 align_words = rs6000_parm_start (mode, type, cum->words);
11732 if (USE_FP_FOR_ARG_P (cum, elt_mode)
11733 && !(TARGET_AIX && !TARGET_ELF
11734 && type != NULL && AGGREGATE_TYPE_P (type)))
11736 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
11738 /* If we are passing this arg in the fixed parameter save area
11739 (gprs or memory) as well as FPRs, we do not use the partial
11740 bytes mechanism; instead, rs6000_function_arg will return a
11741 PARALLEL including a memory element as necessary. */
11742 if (type
11743 && (cum->nargs_prototype <= 0
11744 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11745 && TARGET_XL_COMPAT
11746 && align_words >= GP_ARG_NUM_REG)))
11747 return 0;
11749 /* Otherwise, we pass in FPRs only. Check for partial copies. */
11750 passed_in_gprs = false;
11751 if (cum->fregno + n_elts * n_fpreg > FP_ARG_MAX_REG + 1)
11753 /* Compute number of bytes / words passed in FPRs. If there
11754 is still space available in the register parameter area
11755 *after* that amount, a part of the argument will be passed
11756 in GPRs. In that case, the total amount passed in any
11757 registers is equal to the amount that would have been passed
11758 in GPRs if everything were passed there, so we fall back to
11759 the GPR code below to compute the appropriate value. */
11760 int fpr = ((FP_ARG_MAX_REG + 1 - cum->fregno)
11761 * MIN (8, GET_MODE_SIZE (elt_mode)));
11762 int fpr_words = fpr / (TARGET_32BIT ? 4 : 8);
11764 if (align_words + fpr_words < GP_ARG_NUM_REG)
11765 passed_in_gprs = true;
11766 else
11767 ret = fpr;
11771 if (passed_in_gprs
11772 && align_words < GP_ARG_NUM_REG
11773 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
11774 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
11776 if (ret != 0 && TARGET_DEBUG_ARG)
11777 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
11779 return ret;
11782 /* A C expression that indicates when an argument must be passed by
11783 reference. If nonzero for an argument, a copy of that argument is
11784 made in memory and a pointer to the argument is passed instead of
11785 the argument itself. The pointer is passed in whatever way is
11786 appropriate for passing a pointer to that type.
11788 Under V.4, aggregates and long double are passed by reference.
11790 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
11791 reference unless the AltiVec vector extension ABI is in force.
11793 As an extension to all ABIs, variable sized types are passed by
11794 reference. */
11796 static bool
11797 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
11798 machine_mode mode, const_tree type,
11799 bool named ATTRIBUTE_UNUSED)
11801 if (!type)
11802 return 0;
11804 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
11805 && FLOAT128_IEEE_P (TYPE_MODE (type)))
11807 if (TARGET_DEBUG_ARG)
11808 fprintf (stderr, "function_arg_pass_by_reference: V4 IEEE 128-bit\n");
11809 return 1;
11812 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
11814 if (TARGET_DEBUG_ARG)
11815 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
11816 return 1;
11819 if (int_size_in_bytes (type) < 0)
11821 if (TARGET_DEBUG_ARG)
11822 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
11823 return 1;
11826 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
11827 modes only exist for GCC vector types if -maltivec. */
11828 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
11830 if (TARGET_DEBUG_ARG)
11831 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
11832 return 1;
11835 /* Pass synthetic vectors in memory. */
11836 if (TREE_CODE (type) == VECTOR_TYPE
11837 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
11839 static bool warned_for_pass_big_vectors = false;
11840 if (TARGET_DEBUG_ARG)
11841 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
11842 if (!warned_for_pass_big_vectors)
11844 warning (OPT_Wpsabi, "GCC vector passed by reference: "
11845 "non-standard ABI extension with no compatibility "
11846 "guarantee");
11847 warned_for_pass_big_vectors = true;
11849 return 1;
11852 return 0;
11855 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
11856 already processes. Return true if the parameter must be passed
11857 (fully or partially) on the stack. */
11859 static bool
11860 rs6000_parm_needs_stack (cumulative_args_t args_so_far, tree type)
11862 machine_mode mode;
11863 int unsignedp;
11864 rtx entry_parm;
11866 /* Catch errors. */
11867 if (type == NULL || type == error_mark_node)
11868 return true;
11870 /* Handle types with no storage requirement. */
11871 if (TYPE_MODE (type) == VOIDmode)
11872 return false;
11874 /* Handle complex types. */
11875 if (TREE_CODE (type) == COMPLEX_TYPE)
11876 return (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type))
11877 || rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type)));
11879 /* Handle transparent aggregates. */
11880 if ((TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == RECORD_TYPE)
11881 && TYPE_TRANSPARENT_AGGR (type))
11882 type = TREE_TYPE (first_field (type));
11884 /* See if this arg was passed by invisible reference. */
11885 if (pass_by_reference (get_cumulative_args (args_so_far),
11886 TYPE_MODE (type), type, true))
11887 type = build_pointer_type (type);
11889 /* Find mode as it is passed by the ABI. */
11890 unsignedp = TYPE_UNSIGNED (type);
11891 mode = promote_mode (type, TYPE_MODE (type), &unsignedp);
11893 /* If we must pass in stack, we need a stack. */
11894 if (rs6000_must_pass_in_stack (mode, type))
11895 return true;
11897 /* If there is no incoming register, we need a stack. */
11898 entry_parm = rs6000_function_arg (args_so_far, mode, type, true);
11899 if (entry_parm == NULL)
11900 return true;
11902 /* Likewise if we need to pass both in registers and on the stack. */
11903 if (GET_CODE (entry_parm) == PARALLEL
11904 && XEXP (XVECEXP (entry_parm, 0, 0), 0) == NULL_RTX)
11905 return true;
11907 /* Also true if we're partially in registers and partially not. */
11908 if (rs6000_arg_partial_bytes (args_so_far, mode, type, true) != 0)
11909 return true;
11911 /* Update info on where next arg arrives in registers. */
11912 rs6000_function_arg_advance (args_so_far, mode, type, true);
11913 return false;
11916 /* Return true if FUN has no prototype, has a variable argument
11917 list, or passes any parameter in memory. */
11919 static bool
11920 rs6000_function_parms_need_stack (tree fun, bool incoming)
11922 tree fntype, result;
11923 CUMULATIVE_ARGS args_so_far_v;
11924 cumulative_args_t args_so_far;
11926 if (!fun)
11927 /* Must be a libcall, all of which only use reg parms. */
11928 return false;
11930 fntype = fun;
11931 if (!TYPE_P (fun))
11932 fntype = TREE_TYPE (fun);
11934 /* Varargs functions need the parameter save area. */
11935 if ((!incoming && !prototype_p (fntype)) || stdarg_p (fntype))
11936 return true;
11938 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v, fntype, NULL_RTX);
11939 args_so_far = pack_cumulative_args (&args_so_far_v);
11941 /* When incoming, we will have been passed the function decl.
11942 It is necessary to use the decl to handle K&R style functions,
11943 where TYPE_ARG_TYPES may not be available. */
11944 if (incoming)
11946 gcc_assert (DECL_P (fun));
11947 result = DECL_RESULT (fun);
11949 else
11950 result = TREE_TYPE (fntype);
11952 if (result && aggregate_value_p (result, fntype))
11954 if (!TYPE_P (result))
11955 result = TREE_TYPE (result);
11956 result = build_pointer_type (result);
11957 rs6000_parm_needs_stack (args_so_far, result);
11960 if (incoming)
11962 tree parm;
11964 for (parm = DECL_ARGUMENTS (fun);
11965 parm && parm != void_list_node;
11966 parm = TREE_CHAIN (parm))
11967 if (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (parm)))
11968 return true;
11970 else
11972 function_args_iterator args_iter;
11973 tree arg_type;
11975 FOREACH_FUNCTION_ARGS (fntype, arg_type, args_iter)
11976 if (rs6000_parm_needs_stack (args_so_far, arg_type))
11977 return true;
11980 return false;
11983 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
11984 usually a constant depending on the ABI. However, in the ELFv2 ABI
11985 the register parameter area is optional when calling a function that
11986 has a prototype is scope, has no variable argument list, and passes
11987 all parameters in registers. */
11990 rs6000_reg_parm_stack_space (tree fun, bool incoming)
11992 int reg_parm_stack_space;
11994 switch (DEFAULT_ABI)
11996 default:
11997 reg_parm_stack_space = 0;
11998 break;
12000 case ABI_AIX:
12001 case ABI_DARWIN:
12002 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12003 break;
12005 case ABI_ELFv2:
12006 /* ??? Recomputing this every time is a bit expensive. Is there
12007 a place to cache this information? */
12008 if (rs6000_function_parms_need_stack (fun, incoming))
12009 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12010 else
12011 reg_parm_stack_space = 0;
12012 break;
12015 return reg_parm_stack_space;
12018 static void
12019 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
12021 int i;
12022 machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
12024 if (nregs == 0)
12025 return;
12027 for (i = 0; i < nregs; i++)
12029 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
12030 if (reload_completed)
12032 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
12033 tem = NULL_RTX;
12034 else
12035 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
12036 i * GET_MODE_SIZE (reg_mode));
12038 else
12039 tem = replace_equiv_address (tem, XEXP (tem, 0));
12041 gcc_assert (tem);
12043 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
12047 /* Perform any needed actions needed for a function that is receiving a
12048 variable number of arguments.
12050 CUM is as above.
12052 MODE and TYPE are the mode and type of the current parameter.
12054 PRETEND_SIZE is a variable that should be set to the amount of stack
12055 that must be pushed by the prolog to pretend that our caller pushed
12058 Normally, this macro will push all remaining incoming registers on the
12059 stack and set PRETEND_SIZE to the length of the registers pushed. */
12061 static void
12062 setup_incoming_varargs (cumulative_args_t cum, machine_mode mode,
12063 tree type, int *pretend_size ATTRIBUTE_UNUSED,
12064 int no_rtl)
12066 CUMULATIVE_ARGS next_cum;
12067 int reg_size = TARGET_32BIT ? 4 : 8;
12068 rtx save_area = NULL_RTX, mem;
12069 int first_reg_offset;
12070 alias_set_type set;
12072 /* Skip the last named argument. */
12073 next_cum = *get_cumulative_args (cum);
12074 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
12076 if (DEFAULT_ABI == ABI_V4)
12078 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
12080 if (! no_rtl)
12082 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
12083 HOST_WIDE_INT offset = 0;
12085 /* Try to optimize the size of the varargs save area.
12086 The ABI requires that ap.reg_save_area is doubleword
12087 aligned, but we don't need to allocate space for all
12088 the bytes, only those to which we actually will save
12089 anything. */
12090 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
12091 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
12092 if (TARGET_HARD_FLOAT
12093 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12094 && cfun->va_list_fpr_size)
12096 if (gpr_reg_num)
12097 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
12098 * UNITS_PER_FP_WORD;
12099 if (cfun->va_list_fpr_size
12100 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12101 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
12102 else
12103 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12104 * UNITS_PER_FP_WORD;
12106 if (gpr_reg_num)
12108 offset = -((first_reg_offset * reg_size) & ~7);
12109 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
12111 gpr_reg_num = cfun->va_list_gpr_size;
12112 if (reg_size == 4 && (first_reg_offset & 1))
12113 gpr_reg_num++;
12115 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
12117 else if (fpr_size)
12118 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
12119 * UNITS_PER_FP_WORD
12120 - (int) (GP_ARG_NUM_REG * reg_size);
12122 if (gpr_size + fpr_size)
12124 rtx reg_save_area
12125 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
12126 gcc_assert (MEM_P (reg_save_area));
12127 reg_save_area = XEXP (reg_save_area, 0);
12128 if (GET_CODE (reg_save_area) == PLUS)
12130 gcc_assert (XEXP (reg_save_area, 0)
12131 == virtual_stack_vars_rtx);
12132 gcc_assert (CONST_INT_P (XEXP (reg_save_area, 1)));
12133 offset += INTVAL (XEXP (reg_save_area, 1));
12135 else
12136 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
12139 cfun->machine->varargs_save_offset = offset;
12140 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
12143 else
12145 first_reg_offset = next_cum.words;
12146 save_area = crtl->args.internal_arg_pointer;
12148 if (targetm.calls.must_pass_in_stack (mode, type))
12149 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
12152 set = get_varargs_alias_set ();
12153 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
12154 && cfun->va_list_gpr_size)
12156 int n_gpr, nregs = GP_ARG_NUM_REG - first_reg_offset;
12158 if (va_list_gpr_counter_field)
12159 /* V4 va_list_gpr_size counts number of registers needed. */
12160 n_gpr = cfun->va_list_gpr_size;
12161 else
12162 /* char * va_list instead counts number of bytes needed. */
12163 n_gpr = (cfun->va_list_gpr_size + reg_size - 1) / reg_size;
12165 if (nregs > n_gpr)
12166 nregs = n_gpr;
12168 mem = gen_rtx_MEM (BLKmode,
12169 plus_constant (Pmode, save_area,
12170 first_reg_offset * reg_size));
12171 MEM_NOTRAP_P (mem) = 1;
12172 set_mem_alias_set (mem, set);
12173 set_mem_align (mem, BITS_PER_WORD);
12175 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
12176 nregs);
12179 /* Save FP registers if needed. */
12180 if (DEFAULT_ABI == ABI_V4
12181 && TARGET_HARD_FLOAT
12182 && ! no_rtl
12183 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12184 && cfun->va_list_fpr_size)
12186 int fregno = next_cum.fregno, nregs;
12187 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
12188 rtx lab = gen_label_rtx ();
12189 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
12190 * UNITS_PER_FP_WORD);
12192 emit_jump_insn
12193 (gen_rtx_SET (pc_rtx,
12194 gen_rtx_IF_THEN_ELSE (VOIDmode,
12195 gen_rtx_NE (VOIDmode, cr1,
12196 const0_rtx),
12197 gen_rtx_LABEL_REF (VOIDmode, lab),
12198 pc_rtx)));
12200 for (nregs = 0;
12201 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
12202 fregno++, off += UNITS_PER_FP_WORD, nregs++)
12204 mem = gen_rtx_MEM (TARGET_HARD_FLOAT ? DFmode : SFmode,
12205 plus_constant (Pmode, save_area, off));
12206 MEM_NOTRAP_P (mem) = 1;
12207 set_mem_alias_set (mem, set);
12208 set_mem_align (mem, GET_MODE_ALIGNMENT (
12209 TARGET_HARD_FLOAT ? DFmode : SFmode));
12210 emit_move_insn (mem, gen_rtx_REG (
12211 TARGET_HARD_FLOAT ? DFmode : SFmode, fregno));
12214 emit_label (lab);
12218 /* Create the va_list data type. */
12220 static tree
12221 rs6000_build_builtin_va_list (void)
12223 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
12225 /* For AIX, prefer 'char *' because that's what the system
12226 header files like. */
12227 if (DEFAULT_ABI != ABI_V4)
12228 return build_pointer_type (char_type_node);
12230 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
12231 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
12232 get_identifier ("__va_list_tag"), record);
12234 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
12235 unsigned_char_type_node);
12236 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
12237 unsigned_char_type_node);
12238 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
12239 every user file. */
12240 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12241 get_identifier ("reserved"), short_unsigned_type_node);
12242 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12243 get_identifier ("overflow_arg_area"),
12244 ptr_type_node);
12245 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12246 get_identifier ("reg_save_area"),
12247 ptr_type_node);
12249 va_list_gpr_counter_field = f_gpr;
12250 va_list_fpr_counter_field = f_fpr;
12252 DECL_FIELD_CONTEXT (f_gpr) = record;
12253 DECL_FIELD_CONTEXT (f_fpr) = record;
12254 DECL_FIELD_CONTEXT (f_res) = record;
12255 DECL_FIELD_CONTEXT (f_ovf) = record;
12256 DECL_FIELD_CONTEXT (f_sav) = record;
12258 TYPE_STUB_DECL (record) = type_decl;
12259 TYPE_NAME (record) = type_decl;
12260 TYPE_FIELDS (record) = f_gpr;
12261 DECL_CHAIN (f_gpr) = f_fpr;
12262 DECL_CHAIN (f_fpr) = f_res;
12263 DECL_CHAIN (f_res) = f_ovf;
12264 DECL_CHAIN (f_ovf) = f_sav;
12266 layout_type (record);
12268 /* The correct type is an array type of one element. */
12269 return build_array_type (record, build_index_type (size_zero_node));
12272 /* Implement va_start. */
12274 static void
12275 rs6000_va_start (tree valist, rtx nextarg)
12277 HOST_WIDE_INT words, n_gpr, n_fpr;
12278 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
12279 tree gpr, fpr, ovf, sav, t;
12281 /* Only SVR4 needs something special. */
12282 if (DEFAULT_ABI != ABI_V4)
12284 std_expand_builtin_va_start (valist, nextarg);
12285 return;
12288 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12289 f_fpr = DECL_CHAIN (f_gpr);
12290 f_res = DECL_CHAIN (f_fpr);
12291 f_ovf = DECL_CHAIN (f_res);
12292 f_sav = DECL_CHAIN (f_ovf);
12294 valist = build_simple_mem_ref (valist);
12295 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12296 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
12297 f_fpr, NULL_TREE);
12298 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
12299 f_ovf, NULL_TREE);
12300 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
12301 f_sav, NULL_TREE);
12303 /* Count number of gp and fp argument registers used. */
12304 words = crtl->args.info.words;
12305 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
12306 GP_ARG_NUM_REG);
12307 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
12308 FP_ARG_NUM_REG);
12310 if (TARGET_DEBUG_ARG)
12311 fprintf (stderr, "va_start: words = " HOST_WIDE_INT_PRINT_DEC", n_gpr = "
12312 HOST_WIDE_INT_PRINT_DEC", n_fpr = " HOST_WIDE_INT_PRINT_DEC"\n",
12313 words, n_gpr, n_fpr);
12315 if (cfun->va_list_gpr_size)
12317 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
12318 build_int_cst (NULL_TREE, n_gpr));
12319 TREE_SIDE_EFFECTS (t) = 1;
12320 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12323 if (cfun->va_list_fpr_size)
12325 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
12326 build_int_cst (NULL_TREE, n_fpr));
12327 TREE_SIDE_EFFECTS (t) = 1;
12328 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12330 #ifdef HAVE_AS_GNU_ATTRIBUTE
12331 if (call_ABI_of_interest (cfun->decl))
12332 rs6000_passes_float = true;
12333 #endif
12336 /* Find the overflow area. */
12337 t = make_tree (TREE_TYPE (ovf), crtl->args.internal_arg_pointer);
12338 if (words != 0)
12339 t = fold_build_pointer_plus_hwi (t, words * MIN_UNITS_PER_WORD);
12340 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
12341 TREE_SIDE_EFFECTS (t) = 1;
12342 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12344 /* If there were no va_arg invocations, don't set up the register
12345 save area. */
12346 if (!cfun->va_list_gpr_size
12347 && !cfun->va_list_fpr_size
12348 && n_gpr < GP_ARG_NUM_REG
12349 && n_fpr < FP_ARG_V4_MAX_REG)
12350 return;
12352 /* Find the register save area. */
12353 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
12354 if (cfun->machine->varargs_save_offset)
12355 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
12356 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
12357 TREE_SIDE_EFFECTS (t) = 1;
12358 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12361 /* Implement va_arg. */
12363 static tree
12364 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
12365 gimple_seq *post_p)
12367 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
12368 tree gpr, fpr, ovf, sav, reg, t, u;
12369 int size, rsize, n_reg, sav_ofs, sav_scale;
12370 tree lab_false, lab_over, addr;
12371 int align;
12372 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
12373 int regalign = 0;
12374 gimple *stmt;
12376 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
12378 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
12379 return build_va_arg_indirect_ref (t);
12382 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
12383 earlier version of gcc, with the property that it always applied alignment
12384 adjustments to the va-args (even for zero-sized types). The cheapest way
12385 to deal with this is to replicate the effect of the part of
12386 std_gimplify_va_arg_expr that carries out the align adjust, for the case
12387 of relevance.
12388 We don't need to check for pass-by-reference because of the test above.
12389 We can return a simplifed answer, since we know there's no offset to add. */
12391 if (((TARGET_MACHO
12392 && rs6000_darwin64_abi)
12393 || DEFAULT_ABI == ABI_ELFv2
12394 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
12395 && integer_zerop (TYPE_SIZE (type)))
12397 unsigned HOST_WIDE_INT align, boundary;
12398 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
12399 align = PARM_BOUNDARY / BITS_PER_UNIT;
12400 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
12401 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
12402 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
12403 boundary /= BITS_PER_UNIT;
12404 if (boundary > align)
12406 tree t ;
12407 /* This updates arg ptr by the amount that would be necessary
12408 to align the zero-sized (but not zero-alignment) item. */
12409 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
12410 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
12411 gimplify_and_add (t, pre_p);
12413 t = fold_convert (sizetype, valist_tmp);
12414 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
12415 fold_convert (TREE_TYPE (valist),
12416 fold_build2 (BIT_AND_EXPR, sizetype, t,
12417 size_int (-boundary))));
12418 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
12419 gimplify_and_add (t, pre_p);
12421 /* Since it is zero-sized there's no increment for the item itself. */
12422 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
12423 return build_va_arg_indirect_ref (valist_tmp);
12426 if (DEFAULT_ABI != ABI_V4)
12428 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
12430 tree elem_type = TREE_TYPE (type);
12431 machine_mode elem_mode = TYPE_MODE (elem_type);
12432 int elem_size = GET_MODE_SIZE (elem_mode);
12434 if (elem_size < UNITS_PER_WORD)
12436 tree real_part, imag_part;
12437 gimple_seq post = NULL;
12439 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
12440 &post);
12441 /* Copy the value into a temporary, lest the formal temporary
12442 be reused out from under us. */
12443 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
12444 gimple_seq_add_seq (pre_p, post);
12446 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
12447 post_p);
12449 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
12453 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
12456 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12457 f_fpr = DECL_CHAIN (f_gpr);
12458 f_res = DECL_CHAIN (f_fpr);
12459 f_ovf = DECL_CHAIN (f_res);
12460 f_sav = DECL_CHAIN (f_ovf);
12462 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12463 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
12464 f_fpr, NULL_TREE);
12465 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
12466 f_ovf, NULL_TREE);
12467 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
12468 f_sav, NULL_TREE);
12470 size = int_size_in_bytes (type);
12471 rsize = (size + 3) / 4;
12472 int pad = 4 * rsize - size;
12473 align = 1;
12475 machine_mode mode = TYPE_MODE (type);
12476 if (abi_v4_pass_in_fpr (mode, false))
12478 /* FP args go in FP registers, if present. */
12479 reg = fpr;
12480 n_reg = (size + 7) / 8;
12481 sav_ofs = (TARGET_HARD_FLOAT ? 8 : 4) * 4;
12482 sav_scale = (TARGET_HARD_FLOAT ? 8 : 4);
12483 if (mode != SFmode && mode != SDmode)
12484 align = 8;
12486 else
12488 /* Otherwise into GP registers. */
12489 reg = gpr;
12490 n_reg = rsize;
12491 sav_ofs = 0;
12492 sav_scale = 4;
12493 if (n_reg == 2)
12494 align = 8;
12497 /* Pull the value out of the saved registers.... */
12499 lab_over = NULL;
12500 addr = create_tmp_var (ptr_type_node, "addr");
12502 /* AltiVec vectors never go in registers when -mabi=altivec. */
12503 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
12504 align = 16;
12505 else
12507 lab_false = create_artificial_label (input_location);
12508 lab_over = create_artificial_label (input_location);
12510 /* Long long is aligned in the registers. As are any other 2 gpr
12511 item such as complex int due to a historical mistake. */
12512 u = reg;
12513 if (n_reg == 2 && reg == gpr)
12515 regalign = 1;
12516 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12517 build_int_cst (TREE_TYPE (reg), n_reg - 1));
12518 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
12519 unshare_expr (reg), u);
12521 /* _Decimal128 is passed in even/odd fpr pairs; the stored
12522 reg number is 0 for f1, so we want to make it odd. */
12523 else if (reg == fpr && mode == TDmode)
12525 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12526 build_int_cst (TREE_TYPE (reg), 1));
12527 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
12530 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
12531 t = build2 (GE_EXPR, boolean_type_node, u, t);
12532 u = build1 (GOTO_EXPR, void_type_node, lab_false);
12533 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
12534 gimplify_and_add (t, pre_p);
12536 t = sav;
12537 if (sav_ofs)
12538 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
12540 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12541 build_int_cst (TREE_TYPE (reg), n_reg));
12542 u = fold_convert (sizetype, u);
12543 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
12544 t = fold_build_pointer_plus (t, u);
12546 /* _Decimal32 varargs are located in the second word of the 64-bit
12547 FP register for 32-bit binaries. */
12548 if (TARGET_32BIT && TARGET_HARD_FLOAT && mode == SDmode)
12549 t = fold_build_pointer_plus_hwi (t, size);
12551 /* Args are passed right-aligned. */
12552 if (BYTES_BIG_ENDIAN)
12553 t = fold_build_pointer_plus_hwi (t, pad);
12555 gimplify_assign (addr, t, pre_p);
12557 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
12559 stmt = gimple_build_label (lab_false);
12560 gimple_seq_add_stmt (pre_p, stmt);
12562 if ((n_reg == 2 && !regalign) || n_reg > 2)
12564 /* Ensure that we don't find any more args in regs.
12565 Alignment has taken care of for special cases. */
12566 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
12570 /* ... otherwise out of the overflow area. */
12572 /* Care for on-stack alignment if needed. */
12573 t = ovf;
12574 if (align != 1)
12576 t = fold_build_pointer_plus_hwi (t, align - 1);
12577 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
12578 build_int_cst (TREE_TYPE (t), -align));
12581 /* Args are passed right-aligned. */
12582 if (BYTES_BIG_ENDIAN)
12583 t = fold_build_pointer_plus_hwi (t, pad);
12585 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
12587 gimplify_assign (unshare_expr (addr), t, pre_p);
12589 t = fold_build_pointer_plus_hwi (t, size);
12590 gimplify_assign (unshare_expr (ovf), t, pre_p);
12592 if (lab_over)
12594 stmt = gimple_build_label (lab_over);
12595 gimple_seq_add_stmt (pre_p, stmt);
12598 if (STRICT_ALIGNMENT
12599 && (TYPE_ALIGN (type)
12600 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
12602 /* The value (of type complex double, for example) may not be
12603 aligned in memory in the saved registers, so copy via a
12604 temporary. (This is the same code as used for SPARC.) */
12605 tree tmp = create_tmp_var (type, "va_arg_tmp");
12606 tree dest_addr = build_fold_addr_expr (tmp);
12608 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
12609 3, dest_addr, addr, size_int (rsize * 4));
12610 TREE_ADDRESSABLE (tmp) = 1;
12612 gimplify_and_add (copy, pre_p);
12613 addr = dest_addr;
12616 addr = fold_convert (ptrtype, addr);
12617 return build_va_arg_indirect_ref (addr);
12620 /* Builtins. */
12622 static void
12623 def_builtin (const char *name, tree type, enum rs6000_builtins code)
12625 tree t;
12626 unsigned classify = rs6000_builtin_info[(int)code].attr;
12627 const char *attr_string = "";
12629 gcc_assert (name != NULL);
12630 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
12632 if (rs6000_builtin_decls[(int)code])
12633 fatal_error (input_location,
12634 "internal error: builtin function %qs already processed",
12635 name);
12637 rs6000_builtin_decls[(int)code] = t =
12638 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
12640 /* Set any special attributes. */
12641 if ((classify & RS6000_BTC_CONST) != 0)
12643 /* const function, function only depends on the inputs. */
12644 TREE_READONLY (t) = 1;
12645 TREE_NOTHROW (t) = 1;
12646 attr_string = ", const";
12648 else if ((classify & RS6000_BTC_PURE) != 0)
12650 /* pure function, function can read global memory, but does not set any
12651 external state. */
12652 DECL_PURE_P (t) = 1;
12653 TREE_NOTHROW (t) = 1;
12654 attr_string = ", pure";
12656 else if ((classify & RS6000_BTC_FP) != 0)
12658 /* Function is a math function. If rounding mode is on, then treat the
12659 function as not reading global memory, but it can have arbitrary side
12660 effects. If it is off, then assume the function is a const function.
12661 This mimics the ATTR_MATHFN_FPROUNDING attribute in
12662 builtin-attribute.def that is used for the math functions. */
12663 TREE_NOTHROW (t) = 1;
12664 if (flag_rounding_math)
12666 DECL_PURE_P (t) = 1;
12667 DECL_IS_NOVOPS (t) = 1;
12668 attr_string = ", fp, pure";
12670 else
12672 TREE_READONLY (t) = 1;
12673 attr_string = ", fp, const";
12676 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
12677 gcc_unreachable ();
12679 if (TARGET_DEBUG_BUILTIN)
12680 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
12681 (int)code, name, attr_string);
12684 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
12686 #undef RS6000_BUILTIN_0
12687 #undef RS6000_BUILTIN_1
12688 #undef RS6000_BUILTIN_2
12689 #undef RS6000_BUILTIN_3
12690 #undef RS6000_BUILTIN_A
12691 #undef RS6000_BUILTIN_D
12692 #undef RS6000_BUILTIN_H
12693 #undef RS6000_BUILTIN_P
12694 #undef RS6000_BUILTIN_X
12696 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12697 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12698 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12699 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
12700 { MASK, ICODE, NAME, ENUM },
12702 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12703 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12704 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12705 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12706 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12708 static const struct builtin_description bdesc_3arg[] =
12710 #include "rs6000-builtin.def"
12713 /* DST operations: void foo (void *, const int, const char). */
12715 #undef RS6000_BUILTIN_0
12716 #undef RS6000_BUILTIN_1
12717 #undef RS6000_BUILTIN_2
12718 #undef RS6000_BUILTIN_3
12719 #undef RS6000_BUILTIN_A
12720 #undef RS6000_BUILTIN_D
12721 #undef RS6000_BUILTIN_H
12722 #undef RS6000_BUILTIN_P
12723 #undef RS6000_BUILTIN_X
12725 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12726 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12727 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12728 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12729 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12730 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
12731 { MASK, ICODE, NAME, ENUM },
12733 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12734 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12735 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12737 static const struct builtin_description bdesc_dst[] =
12739 #include "rs6000-builtin.def"
12742 /* Simple binary operations: VECc = foo (VECa, VECb). */
12744 #undef RS6000_BUILTIN_0
12745 #undef RS6000_BUILTIN_1
12746 #undef RS6000_BUILTIN_2
12747 #undef RS6000_BUILTIN_3
12748 #undef RS6000_BUILTIN_A
12749 #undef RS6000_BUILTIN_D
12750 #undef RS6000_BUILTIN_H
12751 #undef RS6000_BUILTIN_P
12752 #undef RS6000_BUILTIN_X
12754 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12755 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12756 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
12757 { MASK, ICODE, NAME, ENUM },
12759 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12760 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12761 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12762 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12763 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12764 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12766 static const struct builtin_description bdesc_2arg[] =
12768 #include "rs6000-builtin.def"
12771 #undef RS6000_BUILTIN_0
12772 #undef RS6000_BUILTIN_1
12773 #undef RS6000_BUILTIN_2
12774 #undef RS6000_BUILTIN_3
12775 #undef RS6000_BUILTIN_A
12776 #undef RS6000_BUILTIN_D
12777 #undef RS6000_BUILTIN_H
12778 #undef RS6000_BUILTIN_P
12779 #undef RS6000_BUILTIN_X
12781 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12782 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12783 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12784 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12785 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12786 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12787 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12788 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
12789 { MASK, ICODE, NAME, ENUM },
12791 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12793 /* AltiVec predicates. */
12795 static const struct builtin_description bdesc_altivec_preds[] =
12797 #include "rs6000-builtin.def"
12800 /* ABS* operations. */
12802 #undef RS6000_BUILTIN_0
12803 #undef RS6000_BUILTIN_1
12804 #undef RS6000_BUILTIN_2
12805 #undef RS6000_BUILTIN_3
12806 #undef RS6000_BUILTIN_A
12807 #undef RS6000_BUILTIN_D
12808 #undef RS6000_BUILTIN_H
12809 #undef RS6000_BUILTIN_P
12810 #undef RS6000_BUILTIN_X
12812 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12813 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12814 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12815 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12816 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
12817 { MASK, ICODE, NAME, ENUM },
12819 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12820 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12821 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12822 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12824 static const struct builtin_description bdesc_abs[] =
12826 #include "rs6000-builtin.def"
12829 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
12830 foo (VECa). */
12832 #undef RS6000_BUILTIN_0
12833 #undef RS6000_BUILTIN_1
12834 #undef RS6000_BUILTIN_2
12835 #undef RS6000_BUILTIN_3
12836 #undef RS6000_BUILTIN_A
12837 #undef RS6000_BUILTIN_D
12838 #undef RS6000_BUILTIN_H
12839 #undef RS6000_BUILTIN_P
12840 #undef RS6000_BUILTIN_X
12842 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12843 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
12844 { MASK, ICODE, NAME, ENUM },
12846 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12847 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12848 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12849 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12850 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12851 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12852 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12854 static const struct builtin_description bdesc_1arg[] =
12856 #include "rs6000-builtin.def"
12859 /* Simple no-argument operations: result = __builtin_darn_32 () */
12861 #undef RS6000_BUILTIN_0
12862 #undef RS6000_BUILTIN_1
12863 #undef RS6000_BUILTIN_2
12864 #undef RS6000_BUILTIN_3
12865 #undef RS6000_BUILTIN_A
12866 #undef RS6000_BUILTIN_D
12867 #undef RS6000_BUILTIN_H
12868 #undef RS6000_BUILTIN_P
12869 #undef RS6000_BUILTIN_X
12871 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
12872 { MASK, ICODE, NAME, ENUM },
12874 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12875 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12876 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12877 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12878 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12879 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12880 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12881 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12883 static const struct builtin_description bdesc_0arg[] =
12885 #include "rs6000-builtin.def"
12888 /* HTM builtins. */
12889 #undef RS6000_BUILTIN_0
12890 #undef RS6000_BUILTIN_1
12891 #undef RS6000_BUILTIN_2
12892 #undef RS6000_BUILTIN_3
12893 #undef RS6000_BUILTIN_A
12894 #undef RS6000_BUILTIN_D
12895 #undef RS6000_BUILTIN_H
12896 #undef RS6000_BUILTIN_P
12897 #undef RS6000_BUILTIN_X
12899 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12900 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12901 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12902 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12903 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12904 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12905 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
12906 { MASK, ICODE, NAME, ENUM },
12908 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12909 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12911 static const struct builtin_description bdesc_htm[] =
12913 #include "rs6000-builtin.def"
12916 #undef RS6000_BUILTIN_0
12917 #undef RS6000_BUILTIN_1
12918 #undef RS6000_BUILTIN_2
12919 #undef RS6000_BUILTIN_3
12920 #undef RS6000_BUILTIN_A
12921 #undef RS6000_BUILTIN_D
12922 #undef RS6000_BUILTIN_H
12923 #undef RS6000_BUILTIN_P
12925 /* Return true if a builtin function is overloaded. */
12926 bool
12927 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
12929 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
12932 const char *
12933 rs6000_overloaded_builtin_name (enum rs6000_builtins fncode)
12935 return rs6000_builtin_info[(int)fncode].name;
12938 /* Expand an expression EXP that calls a builtin without arguments. */
12939 static rtx
12940 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
12942 rtx pat;
12943 machine_mode tmode = insn_data[icode].operand[0].mode;
12945 if (icode == CODE_FOR_nothing)
12946 /* Builtin not supported on this processor. */
12947 return 0;
12949 if (icode == CODE_FOR_rs6000_mffsl
12950 && rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
12952 error ("%<__builtin_mffsl%> not supported with %<-msoft-float%>");
12953 return const0_rtx;
12956 if (target == 0
12957 || GET_MODE (target) != tmode
12958 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12959 target = gen_reg_rtx (tmode);
12961 pat = GEN_FCN (icode) (target);
12962 if (! pat)
12963 return 0;
12964 emit_insn (pat);
12966 return target;
12970 static rtx
12971 rs6000_expand_mtfsf_builtin (enum insn_code icode, tree exp)
12973 rtx pat;
12974 tree arg0 = CALL_EXPR_ARG (exp, 0);
12975 tree arg1 = CALL_EXPR_ARG (exp, 1);
12976 rtx op0 = expand_normal (arg0);
12977 rtx op1 = expand_normal (arg1);
12978 machine_mode mode0 = insn_data[icode].operand[0].mode;
12979 machine_mode mode1 = insn_data[icode].operand[1].mode;
12981 if (icode == CODE_FOR_nothing)
12982 /* Builtin not supported on this processor. */
12983 return 0;
12985 /* If we got invalid arguments bail out before generating bad rtl. */
12986 if (arg0 == error_mark_node || arg1 == error_mark_node)
12987 return const0_rtx;
12989 if (!CONST_INT_P (op0)
12990 || INTVAL (op0) > 255
12991 || INTVAL (op0) < 0)
12993 error ("argument 1 must be an 8-bit field value");
12994 return const0_rtx;
12997 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
12998 op0 = copy_to_mode_reg (mode0, op0);
13000 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
13001 op1 = copy_to_mode_reg (mode1, op1);
13003 pat = GEN_FCN (icode) (op0, op1);
13004 if (!pat)
13005 return const0_rtx;
13006 emit_insn (pat);
13008 return NULL_RTX;
13011 static rtx
13012 rs6000_expand_mtfsb_builtin (enum insn_code icode, tree exp)
13014 rtx pat;
13015 tree arg0 = CALL_EXPR_ARG (exp, 0);
13016 rtx op0 = expand_normal (arg0);
13018 if (icode == CODE_FOR_nothing)
13019 /* Builtin not supported on this processor. */
13020 return 0;
13022 if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13024 error ("%<__builtin_mtfsb0%> and %<__builtin_mtfsb1%> not supported with "
13025 "%<-msoft-float%>");
13026 return const0_rtx;
13029 /* If we got invalid arguments bail out before generating bad rtl. */
13030 if (arg0 == error_mark_node)
13031 return const0_rtx;
13033 /* Only allow bit numbers 0 to 31. */
13034 if (!u5bit_cint_operand (op0, VOIDmode))
13036 error ("Argument must be a constant between 0 and 31.");
13037 return const0_rtx;
13040 pat = GEN_FCN (icode) (op0);
13041 if (!pat)
13042 return const0_rtx;
13043 emit_insn (pat);
13045 return NULL_RTX;
13048 static rtx
13049 rs6000_expand_set_fpscr_rn_builtin (enum insn_code icode, tree exp)
13051 rtx pat;
13052 tree arg0 = CALL_EXPR_ARG (exp, 0);
13053 rtx op0 = expand_normal (arg0);
13054 machine_mode mode0 = insn_data[icode].operand[0].mode;
13056 if (icode == CODE_FOR_nothing)
13057 /* Builtin not supported on this processor. */
13058 return 0;
13060 if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13062 error ("%<__builtin_set_fpscr_rn%> not supported with %<-msoft-float%>");
13063 return const0_rtx;
13066 /* If we got invalid arguments bail out before generating bad rtl. */
13067 if (arg0 == error_mark_node)
13068 return const0_rtx;
13070 /* If the argument is a constant, check the range. Argument can only be a
13071 2-bit value. Unfortunately, can't check the range of the value at
13072 compile time if the argument is a variable. The least significant two
13073 bits of the argument, regardless of type, are used to set the rounding
13074 mode. All other bits are ignored. */
13075 if (CONST_INT_P (op0) && !const_0_to_3_operand(op0, VOIDmode))
13077 error ("Argument must be a value between 0 and 3.");
13078 return const0_rtx;
13081 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13082 op0 = copy_to_mode_reg (mode0, op0);
13084 pat = GEN_FCN (icode) (op0);
13085 if (!pat)
13086 return const0_rtx;
13087 emit_insn (pat);
13089 return NULL_RTX;
13091 static rtx
13092 rs6000_expand_set_fpscr_drn_builtin (enum insn_code icode, tree exp)
13094 rtx pat;
13095 tree arg0 = CALL_EXPR_ARG (exp, 0);
13096 rtx op0 = expand_normal (arg0);
13097 machine_mode mode0 = insn_data[icode].operand[0].mode;
13099 if (TARGET_32BIT)
13100 /* Builtin not supported in 32-bit mode. */
13101 fatal_error (input_location,
13102 "%<__builtin_set_fpscr_drn%> is not supported "
13103 "in 32-bit mode");
13105 if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13107 error ("%<__builtin_set_fpscr_drn%> not supported with %<-msoft-float%>");
13108 return const0_rtx;
13111 if (icode == CODE_FOR_nothing)
13112 /* Builtin not supported on this processor. */
13113 return 0;
13115 /* If we got invalid arguments bail out before generating bad rtl. */
13116 if (arg0 == error_mark_node)
13117 return const0_rtx;
13119 /* If the argument is a constant, check the range. Agrument can only be a
13120 3-bit value. Unfortunately, can't check the range of the value at
13121 compile time if the argument is a variable. The least significant two
13122 bits of the argument, regardless of type, are used to set the rounding
13123 mode. All other bits are ignored. */
13124 if (CONST_INT_P (op0) && !const_0_to_7_operand(op0, VOIDmode))
13126 error ("Argument must be a value between 0 and 7.");
13127 return const0_rtx;
13130 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13131 op0 = copy_to_mode_reg (mode0, op0);
13133 pat = GEN_FCN (icode) (op0);
13134 if (! pat)
13135 return const0_rtx;
13136 emit_insn (pat);
13138 return NULL_RTX;
13141 static rtx
13142 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
13144 rtx pat;
13145 tree arg0 = CALL_EXPR_ARG (exp, 0);
13146 rtx op0 = expand_normal (arg0);
13147 machine_mode tmode = insn_data[icode].operand[0].mode;
13148 machine_mode mode0 = insn_data[icode].operand[1].mode;
13150 if (icode == CODE_FOR_nothing)
13151 /* Builtin not supported on this processor. */
13152 return 0;
13154 /* If we got invalid arguments bail out before generating bad rtl. */
13155 if (arg0 == error_mark_node)
13156 return const0_rtx;
13158 if (icode == CODE_FOR_altivec_vspltisb
13159 || icode == CODE_FOR_altivec_vspltish
13160 || icode == CODE_FOR_altivec_vspltisw)
13162 /* Only allow 5-bit *signed* literals. */
13163 if (!CONST_INT_P (op0)
13164 || INTVAL (op0) > 15
13165 || INTVAL (op0) < -16)
13167 error ("argument 1 must be a 5-bit signed literal");
13168 return CONST0_RTX (tmode);
13172 if (target == 0
13173 || GET_MODE (target) != tmode
13174 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13175 target = gen_reg_rtx (tmode);
13177 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13178 op0 = copy_to_mode_reg (mode0, op0);
13180 pat = GEN_FCN (icode) (target, op0);
13181 if (! pat)
13182 return 0;
13183 emit_insn (pat);
13185 return target;
13188 static rtx
13189 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
13191 rtx pat, scratch1, scratch2;
13192 tree arg0 = CALL_EXPR_ARG (exp, 0);
13193 rtx op0 = expand_normal (arg0);
13194 machine_mode tmode = insn_data[icode].operand[0].mode;
13195 machine_mode mode0 = insn_data[icode].operand[1].mode;
13197 /* If we have invalid arguments, bail out before generating bad rtl. */
13198 if (arg0 == error_mark_node)
13199 return const0_rtx;
13201 if (target == 0
13202 || GET_MODE (target) != tmode
13203 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13204 target = gen_reg_rtx (tmode);
13206 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13207 op0 = copy_to_mode_reg (mode0, op0);
13209 scratch1 = gen_reg_rtx (mode0);
13210 scratch2 = gen_reg_rtx (mode0);
13212 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
13213 if (! pat)
13214 return 0;
13215 emit_insn (pat);
13217 return target;
13220 static rtx
13221 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
13223 rtx pat;
13224 tree arg0 = CALL_EXPR_ARG (exp, 0);
13225 tree arg1 = CALL_EXPR_ARG (exp, 1);
13226 rtx op0 = expand_normal (arg0);
13227 rtx op1 = expand_normal (arg1);
13228 machine_mode tmode = insn_data[icode].operand[0].mode;
13229 machine_mode mode0 = insn_data[icode].operand[1].mode;
13230 machine_mode mode1 = insn_data[icode].operand[2].mode;
13232 if (icode == CODE_FOR_nothing)
13233 /* Builtin not supported on this processor. */
13234 return 0;
13236 /* If we got invalid arguments bail out before generating bad rtl. */
13237 if (arg0 == error_mark_node || arg1 == error_mark_node)
13238 return const0_rtx;
13240 if (icode == CODE_FOR_unpackv1ti
13241 || icode == CODE_FOR_unpackkf
13242 || icode == CODE_FOR_unpacktf
13243 || icode == CODE_FOR_unpackif
13244 || icode == CODE_FOR_unpacktd)
13246 /* Only allow 1-bit unsigned literals. */
13247 STRIP_NOPS (arg1);
13248 if (TREE_CODE (arg1) != INTEGER_CST
13249 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 1))
13251 error ("argument 2 must be a 1-bit unsigned literal");
13252 return CONST0_RTX (tmode);
13255 else if (icode == CODE_FOR_altivec_vspltw)
13257 /* Only allow 2-bit unsigned literals. */
13258 STRIP_NOPS (arg1);
13259 if (TREE_CODE (arg1) != INTEGER_CST
13260 || TREE_INT_CST_LOW (arg1) & ~3)
13262 error ("argument 2 must be a 2-bit unsigned literal");
13263 return CONST0_RTX (tmode);
13266 else if (icode == CODE_FOR_altivec_vsplth)
13268 /* Only allow 3-bit unsigned literals. */
13269 STRIP_NOPS (arg1);
13270 if (TREE_CODE (arg1) != INTEGER_CST
13271 || TREE_INT_CST_LOW (arg1) & ~7)
13273 error ("argument 2 must be a 3-bit unsigned literal");
13274 return CONST0_RTX (tmode);
13277 else if (icode == CODE_FOR_altivec_vspltb)
13279 /* Only allow 4-bit unsigned literals. */
13280 STRIP_NOPS (arg1);
13281 if (TREE_CODE (arg1) != INTEGER_CST
13282 || TREE_INT_CST_LOW (arg1) & ~15)
13284 error ("argument 2 must be a 4-bit unsigned literal");
13285 return CONST0_RTX (tmode);
13288 else if (icode == CODE_FOR_altivec_vcfux
13289 || icode == CODE_FOR_altivec_vcfsx
13290 || icode == CODE_FOR_altivec_vctsxs
13291 || icode == CODE_FOR_altivec_vctuxs)
13293 /* Only allow 5-bit unsigned literals. */
13294 STRIP_NOPS (arg1);
13295 if (TREE_CODE (arg1) != INTEGER_CST
13296 || TREE_INT_CST_LOW (arg1) & ~0x1f)
13298 error ("argument 2 must be a 5-bit unsigned literal");
13299 return CONST0_RTX (tmode);
13302 else if (icode == CODE_FOR_dfptstsfi_eq_dd
13303 || icode == CODE_FOR_dfptstsfi_lt_dd
13304 || icode == CODE_FOR_dfptstsfi_gt_dd
13305 || icode == CODE_FOR_dfptstsfi_unordered_dd
13306 || icode == CODE_FOR_dfptstsfi_eq_td
13307 || icode == CODE_FOR_dfptstsfi_lt_td
13308 || icode == CODE_FOR_dfptstsfi_gt_td
13309 || icode == CODE_FOR_dfptstsfi_unordered_td)
13311 /* Only allow 6-bit unsigned literals. */
13312 STRIP_NOPS (arg0);
13313 if (TREE_CODE (arg0) != INTEGER_CST
13314 || !IN_RANGE (TREE_INT_CST_LOW (arg0), 0, 63))
13316 error ("argument 1 must be a 6-bit unsigned literal");
13317 return CONST0_RTX (tmode);
13320 else if (icode == CODE_FOR_xststdcqp_kf
13321 || icode == CODE_FOR_xststdcqp_tf
13322 || icode == CODE_FOR_xststdcdp
13323 || icode == CODE_FOR_xststdcsp
13324 || icode == CODE_FOR_xvtstdcdp
13325 || icode == CODE_FOR_xvtstdcsp)
13327 /* Only allow 7-bit unsigned literals. */
13328 STRIP_NOPS (arg1);
13329 if (TREE_CODE (arg1) != INTEGER_CST
13330 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 127))
13332 error ("argument 2 must be a 7-bit unsigned literal");
13333 return CONST0_RTX (tmode);
13337 if (target == 0
13338 || GET_MODE (target) != tmode
13339 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13340 target = gen_reg_rtx (tmode);
13342 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13343 op0 = copy_to_mode_reg (mode0, op0);
13344 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13345 op1 = copy_to_mode_reg (mode1, op1);
13347 pat = GEN_FCN (icode) (target, op0, op1);
13348 if (! pat)
13349 return 0;
13350 emit_insn (pat);
13352 return target;
13355 static rtx
13356 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
13358 rtx pat, scratch;
13359 tree cr6_form = CALL_EXPR_ARG (exp, 0);
13360 tree arg0 = CALL_EXPR_ARG (exp, 1);
13361 tree arg1 = CALL_EXPR_ARG (exp, 2);
13362 rtx op0 = expand_normal (arg0);
13363 rtx op1 = expand_normal (arg1);
13364 machine_mode tmode = SImode;
13365 machine_mode mode0 = insn_data[icode].operand[1].mode;
13366 machine_mode mode1 = insn_data[icode].operand[2].mode;
13367 int cr6_form_int;
13369 if (TREE_CODE (cr6_form) != INTEGER_CST)
13371 error ("argument 1 of %qs must be a constant",
13372 "__builtin_altivec_predicate");
13373 return const0_rtx;
13375 else
13376 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
13378 gcc_assert (mode0 == mode1);
13380 /* If we have invalid arguments, bail out before generating bad rtl. */
13381 if (arg0 == error_mark_node || arg1 == error_mark_node)
13382 return const0_rtx;
13384 if (target == 0
13385 || GET_MODE (target) != tmode
13386 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13387 target = gen_reg_rtx (tmode);
13389 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13390 op0 = copy_to_mode_reg (mode0, op0);
13391 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13392 op1 = copy_to_mode_reg (mode1, op1);
13394 /* Note that for many of the relevant operations (e.g. cmpne or
13395 cmpeq) with float or double operands, it makes more sense for the
13396 mode of the allocated scratch register to select a vector of
13397 integer. But the choice to copy the mode of operand 0 was made
13398 long ago and there are no plans to change it. */
13399 scratch = gen_reg_rtx (mode0);
13401 pat = GEN_FCN (icode) (scratch, op0, op1);
13402 if (! pat)
13403 return 0;
13404 emit_insn (pat);
13406 /* The vec_any* and vec_all* predicates use the same opcodes for two
13407 different operations, but the bits in CR6 will be different
13408 depending on what information we want. So we have to play tricks
13409 with CR6 to get the right bits out.
13411 If you think this is disgusting, look at the specs for the
13412 AltiVec predicates. */
13414 switch (cr6_form_int)
13416 case 0:
13417 emit_insn (gen_cr6_test_for_zero (target));
13418 break;
13419 case 1:
13420 emit_insn (gen_cr6_test_for_zero_reverse (target));
13421 break;
13422 case 2:
13423 emit_insn (gen_cr6_test_for_lt (target));
13424 break;
13425 case 3:
13426 emit_insn (gen_cr6_test_for_lt_reverse (target));
13427 break;
13428 default:
13429 error ("argument 1 of %qs is out of range",
13430 "__builtin_altivec_predicate");
13431 break;
13434 return target;
13438 swap_endian_selector_for_mode (machine_mode mode)
13440 unsigned int swap1[16] = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
13441 unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
13442 unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
13443 unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
13445 unsigned int *swaparray, i;
13446 rtx perm[16];
13448 switch (mode)
13450 case E_V1TImode:
13451 swaparray = swap1;
13452 break;
13453 case E_V2DFmode:
13454 case E_V2DImode:
13455 swaparray = swap2;
13456 break;
13457 case E_V4SFmode:
13458 case E_V4SImode:
13459 swaparray = swap4;
13460 break;
13461 case E_V8HImode:
13462 swaparray = swap8;
13463 break;
13464 default:
13465 gcc_unreachable ();
13468 for (i = 0; i < 16; ++i)
13469 perm[i] = GEN_INT (swaparray[i]);
13471 return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode,
13472 gen_rtvec_v (16, perm)));
13475 static rtx
13476 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
13478 rtx pat, addr;
13479 tree arg0 = CALL_EXPR_ARG (exp, 0);
13480 tree arg1 = CALL_EXPR_ARG (exp, 1);
13481 machine_mode tmode = insn_data[icode].operand[0].mode;
13482 machine_mode mode0 = Pmode;
13483 machine_mode mode1 = Pmode;
13484 rtx op0 = expand_normal (arg0);
13485 rtx op1 = expand_normal (arg1);
13487 if (icode == CODE_FOR_nothing)
13488 /* Builtin not supported on this processor. */
13489 return 0;
13491 /* If we got invalid arguments bail out before generating bad rtl. */
13492 if (arg0 == error_mark_node || arg1 == error_mark_node)
13493 return const0_rtx;
13495 if (target == 0
13496 || GET_MODE (target) != tmode
13497 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13498 target = gen_reg_rtx (tmode);
13500 op1 = copy_to_mode_reg (mode1, op1);
13502 /* For LVX, express the RTL accurately by ANDing the address with -16.
13503 LVXL and LVE*X expand to use UNSPECs to hide their special behavior,
13504 so the raw address is fine. */
13505 if (icode == CODE_FOR_altivec_lvx_v1ti
13506 || icode == CODE_FOR_altivec_lvx_v2df
13507 || icode == CODE_FOR_altivec_lvx_v2di
13508 || icode == CODE_FOR_altivec_lvx_v4sf
13509 || icode == CODE_FOR_altivec_lvx_v4si
13510 || icode == CODE_FOR_altivec_lvx_v8hi
13511 || icode == CODE_FOR_altivec_lvx_v16qi)
13513 rtx rawaddr;
13514 if (op0 == const0_rtx)
13515 rawaddr = op1;
13516 else
13518 op0 = copy_to_mode_reg (mode0, op0);
13519 rawaddr = gen_rtx_PLUS (Pmode, op1, op0);
13521 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
13522 addr = gen_rtx_MEM (blk ? BLKmode : tmode, addr);
13524 emit_insn (gen_rtx_SET (target, addr));
13526 else
13528 if (op0 == const0_rtx)
13529 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
13530 else
13532 op0 = copy_to_mode_reg (mode0, op0);
13533 addr = gen_rtx_MEM (blk ? BLKmode : tmode,
13534 gen_rtx_PLUS (Pmode, op1, op0));
13537 pat = GEN_FCN (icode) (target, addr);
13538 if (! pat)
13539 return 0;
13540 emit_insn (pat);
13543 return target;
13546 static rtx
13547 altivec_expand_stxvl_builtin (enum insn_code icode, tree exp)
13549 rtx pat;
13550 tree arg0 = CALL_EXPR_ARG (exp, 0);
13551 tree arg1 = CALL_EXPR_ARG (exp, 1);
13552 tree arg2 = CALL_EXPR_ARG (exp, 2);
13553 rtx op0 = expand_normal (arg0);
13554 rtx op1 = expand_normal (arg1);
13555 rtx op2 = expand_normal (arg2);
13556 machine_mode mode0 = insn_data[icode].operand[0].mode;
13557 machine_mode mode1 = insn_data[icode].operand[1].mode;
13558 machine_mode mode2 = insn_data[icode].operand[2].mode;
13560 if (icode == CODE_FOR_nothing)
13561 /* Builtin not supported on this processor. */
13562 return NULL_RTX;
13564 /* If we got invalid arguments bail out before generating bad rtl. */
13565 if (arg0 == error_mark_node
13566 || arg1 == error_mark_node
13567 || arg2 == error_mark_node)
13568 return NULL_RTX;
13570 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13571 op0 = copy_to_mode_reg (mode0, op0);
13572 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13573 op1 = copy_to_mode_reg (mode1, op1);
13574 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
13575 op2 = copy_to_mode_reg (mode2, op2);
13577 pat = GEN_FCN (icode) (op0, op1, op2);
13578 if (pat)
13579 emit_insn (pat);
13581 return NULL_RTX;
13584 static rtx
13585 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
13587 tree arg0 = CALL_EXPR_ARG (exp, 0);
13588 tree arg1 = CALL_EXPR_ARG (exp, 1);
13589 tree arg2 = CALL_EXPR_ARG (exp, 2);
13590 rtx op0 = expand_normal (arg0);
13591 rtx op1 = expand_normal (arg1);
13592 rtx op2 = expand_normal (arg2);
13593 rtx pat, addr, rawaddr;
13594 machine_mode tmode = insn_data[icode].operand[0].mode;
13595 machine_mode smode = insn_data[icode].operand[1].mode;
13596 machine_mode mode1 = Pmode;
13597 machine_mode mode2 = Pmode;
13599 /* Invalid arguments. Bail before doing anything stoopid! */
13600 if (arg0 == error_mark_node
13601 || arg1 == error_mark_node
13602 || arg2 == error_mark_node)
13603 return const0_rtx;
13605 op2 = copy_to_mode_reg (mode2, op2);
13607 /* For STVX, express the RTL accurately by ANDing the address with -16.
13608 STVXL and STVE*X expand to use UNSPECs to hide their special behavior,
13609 so the raw address is fine. */
13610 if (icode == CODE_FOR_altivec_stvx_v2df
13611 || icode == CODE_FOR_altivec_stvx_v2di
13612 || icode == CODE_FOR_altivec_stvx_v4sf
13613 || icode == CODE_FOR_altivec_stvx_v4si
13614 || icode == CODE_FOR_altivec_stvx_v8hi
13615 || icode == CODE_FOR_altivec_stvx_v16qi)
13617 if (op1 == const0_rtx)
13618 rawaddr = op2;
13619 else
13621 op1 = copy_to_mode_reg (mode1, op1);
13622 rawaddr = gen_rtx_PLUS (Pmode, op2, op1);
13625 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
13626 addr = gen_rtx_MEM (tmode, addr);
13628 op0 = copy_to_mode_reg (tmode, op0);
13630 emit_insn (gen_rtx_SET (addr, op0));
13632 else
13634 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
13635 op0 = copy_to_mode_reg (smode, op0);
13637 if (op1 == const0_rtx)
13638 addr = gen_rtx_MEM (tmode, op2);
13639 else
13641 op1 = copy_to_mode_reg (mode1, op1);
13642 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op2, op1));
13645 pat = GEN_FCN (icode) (addr, op0);
13646 if (pat)
13647 emit_insn (pat);
13650 return NULL_RTX;
13653 /* Return the appropriate SPR number associated with the given builtin. */
13654 static inline HOST_WIDE_INT
13655 htm_spr_num (enum rs6000_builtins code)
13657 if (code == HTM_BUILTIN_GET_TFHAR
13658 || code == HTM_BUILTIN_SET_TFHAR)
13659 return TFHAR_SPR;
13660 else if (code == HTM_BUILTIN_GET_TFIAR
13661 || code == HTM_BUILTIN_SET_TFIAR)
13662 return TFIAR_SPR;
13663 else if (code == HTM_BUILTIN_GET_TEXASR
13664 || code == HTM_BUILTIN_SET_TEXASR)
13665 return TEXASR_SPR;
13666 gcc_assert (code == HTM_BUILTIN_GET_TEXASRU
13667 || code == HTM_BUILTIN_SET_TEXASRU);
13668 return TEXASRU_SPR;
13671 /* Return the correct ICODE value depending on whether we are
13672 setting or reading the HTM SPRs. */
13673 static inline enum insn_code
13674 rs6000_htm_spr_icode (bool nonvoid)
13676 if (nonvoid)
13677 return (TARGET_POWERPC64) ? CODE_FOR_htm_mfspr_di : CODE_FOR_htm_mfspr_si;
13678 else
13679 return (TARGET_POWERPC64) ? CODE_FOR_htm_mtspr_di : CODE_FOR_htm_mtspr_si;
13682 /* Expand the HTM builtin in EXP and store the result in TARGET.
13683 Store true in *EXPANDEDP if we found a builtin to expand. */
13684 static rtx
13685 htm_expand_builtin (tree exp, rtx target, bool * expandedp)
13687 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
13688 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
13689 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
13690 const struct builtin_description *d;
13691 size_t i;
13693 *expandedp = true;
13695 if (!TARGET_POWERPC64
13696 && (fcode == HTM_BUILTIN_TABORTDC
13697 || fcode == HTM_BUILTIN_TABORTDCI))
13699 size_t uns_fcode = (size_t)fcode;
13700 const char *name = rs6000_builtin_info[uns_fcode].name;
13701 error ("builtin %qs is only valid in 64-bit mode", name);
13702 return const0_rtx;
13705 /* Expand the HTM builtins. */
13706 d = bdesc_htm;
13707 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
13708 if (d->code == fcode)
13710 rtx op[MAX_HTM_OPERANDS], pat;
13711 int nopnds = 0;
13712 tree arg;
13713 call_expr_arg_iterator iter;
13714 unsigned attr = rs6000_builtin_info[fcode].attr;
13715 enum insn_code icode = d->icode;
13716 const struct insn_operand_data *insn_op;
13717 bool uses_spr = (attr & RS6000_BTC_SPR);
13718 rtx cr = NULL_RTX;
13720 if (uses_spr)
13721 icode = rs6000_htm_spr_icode (nonvoid);
13722 insn_op = &insn_data[icode].operand[0];
13724 if (nonvoid)
13726 machine_mode tmode = (uses_spr) ? insn_op->mode : E_SImode;
13727 if (!target
13728 || GET_MODE (target) != tmode
13729 || (uses_spr && !(*insn_op->predicate) (target, tmode)))
13730 target = gen_reg_rtx (tmode);
13731 if (uses_spr)
13732 op[nopnds++] = target;
13735 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
13737 if (arg == error_mark_node || nopnds >= MAX_HTM_OPERANDS)
13738 return const0_rtx;
13740 insn_op = &insn_data[icode].operand[nopnds];
13742 op[nopnds] = expand_normal (arg);
13744 if (!(*insn_op->predicate) (op[nopnds], insn_op->mode))
13746 if (!strcmp (insn_op->constraint, "n"))
13748 int arg_num = (nonvoid) ? nopnds : nopnds + 1;
13749 if (!CONST_INT_P (op[nopnds]))
13750 error ("argument %d must be an unsigned literal", arg_num);
13751 else
13752 error ("argument %d is an unsigned literal that is "
13753 "out of range", arg_num);
13754 return const0_rtx;
13756 op[nopnds] = copy_to_mode_reg (insn_op->mode, op[nopnds]);
13759 nopnds++;
13762 /* Handle the builtins for extended mnemonics. These accept
13763 no arguments, but map to builtins that take arguments. */
13764 switch (fcode)
13766 case HTM_BUILTIN_TENDALL: /* Alias for: tend. 1 */
13767 case HTM_BUILTIN_TRESUME: /* Alias for: tsr. 1 */
13768 op[nopnds++] = GEN_INT (1);
13769 if (flag_checking)
13770 attr |= RS6000_BTC_UNARY;
13771 break;
13772 case HTM_BUILTIN_TSUSPEND: /* Alias for: tsr. 0 */
13773 op[nopnds++] = GEN_INT (0);
13774 if (flag_checking)
13775 attr |= RS6000_BTC_UNARY;
13776 break;
13777 default:
13778 break;
13781 /* If this builtin accesses SPRs, then pass in the appropriate
13782 SPR number and SPR regno as the last two operands. */
13783 if (uses_spr)
13785 machine_mode mode = (TARGET_POWERPC64) ? DImode : SImode;
13786 op[nopnds++] = gen_rtx_CONST_INT (mode, htm_spr_num (fcode));
13788 /* If this builtin accesses a CR, then pass in a scratch
13789 CR as the last operand. */
13790 else if (attr & RS6000_BTC_CR)
13791 { cr = gen_reg_rtx (CCmode);
13792 op[nopnds++] = cr;
13795 if (flag_checking)
13797 int expected_nopnds = 0;
13798 if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_UNARY)
13799 expected_nopnds = 1;
13800 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_BINARY)
13801 expected_nopnds = 2;
13802 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_TERNARY)
13803 expected_nopnds = 3;
13804 if (!(attr & RS6000_BTC_VOID))
13805 expected_nopnds += 1;
13806 if (uses_spr)
13807 expected_nopnds += 1;
13809 gcc_assert (nopnds == expected_nopnds
13810 && nopnds <= MAX_HTM_OPERANDS);
13813 switch (nopnds)
13815 case 1:
13816 pat = GEN_FCN (icode) (op[0]);
13817 break;
13818 case 2:
13819 pat = GEN_FCN (icode) (op[0], op[1]);
13820 break;
13821 case 3:
13822 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
13823 break;
13824 case 4:
13825 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
13826 break;
13827 default:
13828 gcc_unreachable ();
13830 if (!pat)
13831 return NULL_RTX;
13832 emit_insn (pat);
13834 if (attr & RS6000_BTC_CR)
13836 if (fcode == HTM_BUILTIN_TBEGIN)
13838 /* Emit code to set TARGET to true or false depending on
13839 whether the tbegin. instruction successfully or failed
13840 to start a transaction. We do this by placing the 1's
13841 complement of CR's EQ bit into TARGET. */
13842 rtx scratch = gen_reg_rtx (SImode);
13843 emit_insn (gen_rtx_SET (scratch,
13844 gen_rtx_EQ (SImode, cr,
13845 const0_rtx)));
13846 emit_insn (gen_rtx_SET (target,
13847 gen_rtx_XOR (SImode, scratch,
13848 GEN_INT (1))));
13850 else
13852 /* Emit code to copy the 4-bit condition register field
13853 CR into the least significant end of register TARGET. */
13854 rtx scratch1 = gen_reg_rtx (SImode);
13855 rtx scratch2 = gen_reg_rtx (SImode);
13856 rtx subreg = simplify_gen_subreg (CCmode, scratch1, SImode, 0);
13857 emit_insn (gen_movcc (subreg, cr));
13858 emit_insn (gen_lshrsi3 (scratch2, scratch1, GEN_INT (28)));
13859 emit_insn (gen_andsi3 (target, scratch2, GEN_INT (0xf)));
13863 if (nonvoid)
13864 return target;
13865 return const0_rtx;
13868 *expandedp = false;
13869 return NULL_RTX;
13872 /* Expand the CPU builtin in FCODE and store the result in TARGET. */
13874 static rtx
13875 cpu_expand_builtin (enum rs6000_builtins fcode, tree exp ATTRIBUTE_UNUSED,
13876 rtx target)
13878 /* __builtin_cpu_init () is a nop, so expand to nothing. */
13879 if (fcode == RS6000_BUILTIN_CPU_INIT)
13880 return const0_rtx;
13882 if (target == 0 || GET_MODE (target) != SImode)
13883 target = gen_reg_rtx (SImode);
13885 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
13886 tree arg = TREE_OPERAND (CALL_EXPR_ARG (exp, 0), 0);
13887 /* Target clones creates an ARRAY_REF instead of STRING_CST, convert it back
13888 to a STRING_CST. */
13889 if (TREE_CODE (arg) == ARRAY_REF
13890 && TREE_CODE (TREE_OPERAND (arg, 0)) == STRING_CST
13891 && TREE_CODE (TREE_OPERAND (arg, 1)) == INTEGER_CST
13892 && compare_tree_int (TREE_OPERAND (arg, 1), 0) == 0)
13893 arg = TREE_OPERAND (arg, 0);
13895 if (TREE_CODE (arg) != STRING_CST)
13897 error ("builtin %qs only accepts a string argument",
13898 rs6000_builtin_info[(size_t) fcode].name);
13899 return const0_rtx;
13902 if (fcode == RS6000_BUILTIN_CPU_IS)
13904 const char *cpu = TREE_STRING_POINTER (arg);
13905 rtx cpuid = NULL_RTX;
13906 for (size_t i = 0; i < ARRAY_SIZE (cpu_is_info); i++)
13907 if (strcmp (cpu, cpu_is_info[i].cpu) == 0)
13909 /* The CPUID value in the TCB is offset by _DL_FIRST_PLATFORM. */
13910 cpuid = GEN_INT (cpu_is_info[i].cpuid + _DL_FIRST_PLATFORM);
13911 break;
13913 if (cpuid == NULL_RTX)
13915 /* Invalid CPU argument. */
13916 error ("cpu %qs is an invalid argument to builtin %qs",
13917 cpu, rs6000_builtin_info[(size_t) fcode].name);
13918 return const0_rtx;
13921 rtx platform = gen_reg_rtx (SImode);
13922 rtx tcbmem = gen_const_mem (SImode,
13923 gen_rtx_PLUS (Pmode,
13924 gen_rtx_REG (Pmode, TLS_REGNUM),
13925 GEN_INT (TCB_PLATFORM_OFFSET)));
13926 emit_move_insn (platform, tcbmem);
13927 emit_insn (gen_eqsi3 (target, platform, cpuid));
13929 else if (fcode == RS6000_BUILTIN_CPU_SUPPORTS)
13931 const char *hwcap = TREE_STRING_POINTER (arg);
13932 rtx mask = NULL_RTX;
13933 int hwcap_offset;
13934 for (size_t i = 0; i < ARRAY_SIZE (cpu_supports_info); i++)
13935 if (strcmp (hwcap, cpu_supports_info[i].hwcap) == 0)
13937 mask = GEN_INT (cpu_supports_info[i].mask);
13938 hwcap_offset = TCB_HWCAP_OFFSET (cpu_supports_info[i].id);
13939 break;
13941 if (mask == NULL_RTX)
13943 /* Invalid HWCAP argument. */
13944 error ("%s %qs is an invalid argument to builtin %qs",
13945 "hwcap", hwcap, rs6000_builtin_info[(size_t) fcode].name);
13946 return const0_rtx;
13949 rtx tcb_hwcap = gen_reg_rtx (SImode);
13950 rtx tcbmem = gen_const_mem (SImode,
13951 gen_rtx_PLUS (Pmode,
13952 gen_rtx_REG (Pmode, TLS_REGNUM),
13953 GEN_INT (hwcap_offset)));
13954 emit_move_insn (tcb_hwcap, tcbmem);
13955 rtx scratch1 = gen_reg_rtx (SImode);
13956 emit_insn (gen_rtx_SET (scratch1, gen_rtx_AND (SImode, tcb_hwcap, mask)));
13957 rtx scratch2 = gen_reg_rtx (SImode);
13958 emit_insn (gen_eqsi3 (scratch2, scratch1, const0_rtx));
13959 emit_insn (gen_rtx_SET (target, gen_rtx_XOR (SImode, scratch2, const1_rtx)));
13961 else
13962 gcc_unreachable ();
13964 /* Record that we have expanded a CPU builtin, so that we can later
13965 emit a reference to the special symbol exported by LIBC to ensure we
13966 do not link against an old LIBC that doesn't support this feature. */
13967 cpu_builtin_p = true;
13969 #else
13970 warning (0, "builtin %qs needs GLIBC (2.23 and newer) that exports hardware "
13971 "capability bits", rs6000_builtin_info[(size_t) fcode].name);
13973 /* For old LIBCs, always return FALSE. */
13974 emit_move_insn (target, GEN_INT (0));
13975 #endif /* TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */
13977 return target;
13980 static rtx
13981 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
13983 rtx pat;
13984 tree arg0 = CALL_EXPR_ARG (exp, 0);
13985 tree arg1 = CALL_EXPR_ARG (exp, 1);
13986 tree arg2 = CALL_EXPR_ARG (exp, 2);
13987 rtx op0 = expand_normal (arg0);
13988 rtx op1 = expand_normal (arg1);
13989 rtx op2 = expand_normal (arg2);
13990 machine_mode tmode = insn_data[icode].operand[0].mode;
13991 machine_mode mode0 = insn_data[icode].operand[1].mode;
13992 machine_mode mode1 = insn_data[icode].operand[2].mode;
13993 machine_mode mode2 = insn_data[icode].operand[3].mode;
13995 if (icode == CODE_FOR_nothing)
13996 /* Builtin not supported on this processor. */
13997 return 0;
13999 /* If we got invalid arguments bail out before generating bad rtl. */
14000 if (arg0 == error_mark_node
14001 || arg1 == error_mark_node
14002 || arg2 == error_mark_node)
14003 return const0_rtx;
14005 /* Check and prepare argument depending on the instruction code.
14007 Note that a switch statement instead of the sequence of tests
14008 would be incorrect as many of the CODE_FOR values could be
14009 CODE_FOR_nothing and that would yield multiple alternatives
14010 with identical values. We'd never reach here at runtime in
14011 this case. */
14012 if (icode == CODE_FOR_altivec_vsldoi_v4sf
14013 || icode == CODE_FOR_altivec_vsldoi_v2df
14014 || icode == CODE_FOR_altivec_vsldoi_v4si
14015 || icode == CODE_FOR_altivec_vsldoi_v8hi
14016 || icode == CODE_FOR_altivec_vsldoi_v16qi)
14018 /* Only allow 4-bit unsigned literals. */
14019 STRIP_NOPS (arg2);
14020 if (TREE_CODE (arg2) != INTEGER_CST
14021 || TREE_INT_CST_LOW (arg2) & ~0xf)
14023 error ("argument 3 must be a 4-bit unsigned literal");
14024 return CONST0_RTX (tmode);
14027 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
14028 || icode == CODE_FOR_vsx_xxpermdi_v2di
14029 || icode == CODE_FOR_vsx_xxpermdi_v2df_be
14030 || icode == CODE_FOR_vsx_xxpermdi_v2di_be
14031 || icode == CODE_FOR_vsx_xxpermdi_v1ti
14032 || icode == CODE_FOR_vsx_xxpermdi_v4sf
14033 || icode == CODE_FOR_vsx_xxpermdi_v4si
14034 || icode == CODE_FOR_vsx_xxpermdi_v8hi
14035 || icode == CODE_FOR_vsx_xxpermdi_v16qi
14036 || icode == CODE_FOR_vsx_xxsldwi_v16qi
14037 || icode == CODE_FOR_vsx_xxsldwi_v8hi
14038 || icode == CODE_FOR_vsx_xxsldwi_v4si
14039 || icode == CODE_FOR_vsx_xxsldwi_v4sf
14040 || icode == CODE_FOR_vsx_xxsldwi_v2di
14041 || icode == CODE_FOR_vsx_xxsldwi_v2df)
14043 /* Only allow 2-bit unsigned literals. */
14044 STRIP_NOPS (arg2);
14045 if (TREE_CODE (arg2) != INTEGER_CST
14046 || TREE_INT_CST_LOW (arg2) & ~0x3)
14048 error ("argument 3 must be a 2-bit unsigned literal");
14049 return CONST0_RTX (tmode);
14052 else if (icode == CODE_FOR_vsx_set_v2df
14053 || icode == CODE_FOR_vsx_set_v2di
14054 || icode == CODE_FOR_bcdadd
14055 || icode == CODE_FOR_bcdadd_lt
14056 || icode == CODE_FOR_bcdadd_eq
14057 || icode == CODE_FOR_bcdadd_gt
14058 || icode == CODE_FOR_bcdsub
14059 || icode == CODE_FOR_bcdsub_lt
14060 || icode == CODE_FOR_bcdsub_eq
14061 || icode == CODE_FOR_bcdsub_gt)
14063 /* Only allow 1-bit unsigned literals. */
14064 STRIP_NOPS (arg2);
14065 if (TREE_CODE (arg2) != INTEGER_CST
14066 || TREE_INT_CST_LOW (arg2) & ~0x1)
14068 error ("argument 3 must be a 1-bit unsigned literal");
14069 return CONST0_RTX (tmode);
14072 else if (icode == CODE_FOR_dfp_ddedpd_dd
14073 || icode == CODE_FOR_dfp_ddedpd_td)
14075 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
14076 STRIP_NOPS (arg0);
14077 if (TREE_CODE (arg0) != INTEGER_CST
14078 || TREE_INT_CST_LOW (arg2) & ~0x3)
14080 error ("argument 1 must be 0 or 2");
14081 return CONST0_RTX (tmode);
14084 else if (icode == CODE_FOR_dfp_denbcd_dd
14085 || icode == CODE_FOR_dfp_denbcd_td)
14087 /* Only allow 1-bit unsigned literals. */
14088 STRIP_NOPS (arg0);
14089 if (TREE_CODE (arg0) != INTEGER_CST
14090 || TREE_INT_CST_LOW (arg0) & ~0x1)
14092 error ("argument 1 must be a 1-bit unsigned literal");
14093 return CONST0_RTX (tmode);
14096 else if (icode == CODE_FOR_dfp_dscli_dd
14097 || icode == CODE_FOR_dfp_dscli_td
14098 || icode == CODE_FOR_dfp_dscri_dd
14099 || icode == CODE_FOR_dfp_dscri_td)
14101 /* Only allow 6-bit unsigned literals. */
14102 STRIP_NOPS (arg1);
14103 if (TREE_CODE (arg1) != INTEGER_CST
14104 || TREE_INT_CST_LOW (arg1) & ~0x3f)
14106 error ("argument 2 must be a 6-bit unsigned literal");
14107 return CONST0_RTX (tmode);
14110 else if (icode == CODE_FOR_crypto_vshasigmaw
14111 || icode == CODE_FOR_crypto_vshasigmad)
14113 /* Check whether the 2nd and 3rd arguments are integer constants and in
14114 range and prepare arguments. */
14115 STRIP_NOPS (arg1);
14116 if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (wi::to_wide (arg1), 2))
14118 error ("argument 2 must be 0 or 1");
14119 return CONST0_RTX (tmode);
14122 STRIP_NOPS (arg2);
14123 if (TREE_CODE (arg2) != INTEGER_CST
14124 || wi::geu_p (wi::to_wide (arg2), 16))
14126 error ("argument 3 must be in the range [0, 15]");
14127 return CONST0_RTX (tmode);
14131 if (target == 0
14132 || GET_MODE (target) != tmode
14133 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14134 target = gen_reg_rtx (tmode);
14136 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14137 op0 = copy_to_mode_reg (mode0, op0);
14138 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14139 op1 = copy_to_mode_reg (mode1, op1);
14140 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
14141 op2 = copy_to_mode_reg (mode2, op2);
14143 pat = GEN_FCN (icode) (target, op0, op1, op2);
14144 if (! pat)
14145 return 0;
14146 emit_insn (pat);
14148 return target;
14152 /* Expand the dst builtins. */
14153 static rtx
14154 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
14155 bool *expandedp)
14157 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14158 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14159 tree arg0, arg1, arg2;
14160 machine_mode mode0, mode1;
14161 rtx pat, op0, op1, op2;
14162 const struct builtin_description *d;
14163 size_t i;
14165 *expandedp = false;
14167 /* Handle DST variants. */
14168 d = bdesc_dst;
14169 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
14170 if (d->code == fcode)
14172 arg0 = CALL_EXPR_ARG (exp, 0);
14173 arg1 = CALL_EXPR_ARG (exp, 1);
14174 arg2 = CALL_EXPR_ARG (exp, 2);
14175 op0 = expand_normal (arg0);
14176 op1 = expand_normal (arg1);
14177 op2 = expand_normal (arg2);
14178 mode0 = insn_data[d->icode].operand[0].mode;
14179 mode1 = insn_data[d->icode].operand[1].mode;
14181 /* Invalid arguments, bail out before generating bad rtl. */
14182 if (arg0 == error_mark_node
14183 || arg1 == error_mark_node
14184 || arg2 == error_mark_node)
14185 return const0_rtx;
14187 *expandedp = true;
14188 STRIP_NOPS (arg2);
14189 if (TREE_CODE (arg2) != INTEGER_CST
14190 || TREE_INT_CST_LOW (arg2) & ~0x3)
14192 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
14193 return const0_rtx;
14196 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
14197 op0 = copy_to_mode_reg (Pmode, op0);
14198 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
14199 op1 = copy_to_mode_reg (mode1, op1);
14201 pat = GEN_FCN (d->icode) (op0, op1, op2);
14202 if (pat != 0)
14203 emit_insn (pat);
14205 return NULL_RTX;
14208 return NULL_RTX;
14211 /* Expand vec_init builtin. */
14212 static rtx
14213 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
14215 machine_mode tmode = TYPE_MODE (type);
14216 machine_mode inner_mode = GET_MODE_INNER (tmode);
14217 int i, n_elt = GET_MODE_NUNITS (tmode);
14219 gcc_assert (VECTOR_MODE_P (tmode));
14220 gcc_assert (n_elt == call_expr_nargs (exp));
14222 if (!target || !register_operand (target, tmode))
14223 target = gen_reg_rtx (tmode);
14225 /* If we have a vector compromised of a single element, such as V1TImode, do
14226 the initialization directly. */
14227 if (n_elt == 1 && GET_MODE_SIZE (tmode) == GET_MODE_SIZE (inner_mode))
14229 rtx x = expand_normal (CALL_EXPR_ARG (exp, 0));
14230 emit_move_insn (target, gen_lowpart (tmode, x));
14232 else
14234 rtvec v = rtvec_alloc (n_elt);
14236 for (i = 0; i < n_elt; ++i)
14238 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
14239 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
14242 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
14245 return target;
14248 /* Return the integer constant in ARG. Constrain it to be in the range
14249 of the subparts of VEC_TYPE; issue an error if not. */
14251 static int
14252 get_element_number (tree vec_type, tree arg)
14254 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
14256 if (!tree_fits_uhwi_p (arg)
14257 || (elt = tree_to_uhwi (arg), elt > max))
14259 error ("selector must be an integer constant in the range [0, %wi]", max);
14260 return 0;
14263 return elt;
14266 /* Expand vec_set builtin. */
14267 static rtx
14268 altivec_expand_vec_set_builtin (tree exp)
14270 machine_mode tmode, mode1;
14271 tree arg0, arg1, arg2;
14272 int elt;
14273 rtx op0, op1;
14275 arg0 = CALL_EXPR_ARG (exp, 0);
14276 arg1 = CALL_EXPR_ARG (exp, 1);
14277 arg2 = CALL_EXPR_ARG (exp, 2);
14279 tmode = TYPE_MODE (TREE_TYPE (arg0));
14280 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14281 gcc_assert (VECTOR_MODE_P (tmode));
14283 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
14284 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
14285 elt = get_element_number (TREE_TYPE (arg0), arg2);
14287 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
14288 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
14290 op0 = force_reg (tmode, op0);
14291 op1 = force_reg (mode1, op1);
14293 rs6000_expand_vector_set (op0, op1, elt);
14295 return op0;
14298 /* Expand vec_ext builtin. */
14299 static rtx
14300 altivec_expand_vec_ext_builtin (tree exp, rtx target)
14302 machine_mode tmode, mode0;
14303 tree arg0, arg1;
14304 rtx op0;
14305 rtx op1;
14307 arg0 = CALL_EXPR_ARG (exp, 0);
14308 arg1 = CALL_EXPR_ARG (exp, 1);
14310 op0 = expand_normal (arg0);
14311 op1 = expand_normal (arg1);
14313 if (TREE_CODE (arg1) == INTEGER_CST)
14315 unsigned HOST_WIDE_INT elt;
14316 unsigned HOST_WIDE_INT size = TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg0));
14317 unsigned int truncated_selector;
14318 /* Even if !tree_fits_uhwi_p (arg1)), TREE_INT_CST_LOW (arg0)
14319 returns low-order bits of INTEGER_CST for modulo indexing. */
14320 elt = TREE_INT_CST_LOW (arg1);
14321 truncated_selector = elt % size;
14322 op1 = GEN_INT (truncated_selector);
14325 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14326 mode0 = TYPE_MODE (TREE_TYPE (arg0));
14327 gcc_assert (VECTOR_MODE_P (mode0));
14329 op0 = force_reg (mode0, op0);
14331 if (optimize || !target || !register_operand (target, tmode))
14332 target = gen_reg_rtx (tmode);
14334 rs6000_expand_vector_extract (target, op0, op1);
14336 return target;
14339 /* Expand the builtin in EXP and store the result in TARGET. Store
14340 true in *EXPANDEDP if we found a builtin to expand. */
14341 static rtx
14342 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
14344 const struct builtin_description *d;
14345 size_t i;
14346 enum insn_code icode;
14347 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14348 tree arg0, arg1, arg2;
14349 rtx op0, pat;
14350 machine_mode tmode, mode0;
14351 enum rs6000_builtins fcode
14352 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14354 if (rs6000_overloaded_builtin_p (fcode))
14356 *expandedp = true;
14357 error ("unresolved overload for Altivec builtin %qF", fndecl);
14359 /* Given it is invalid, just generate a normal call. */
14360 return expand_call (exp, target, false);
14363 target = altivec_expand_dst_builtin (exp, target, expandedp);
14364 if (*expandedp)
14365 return target;
14367 *expandedp = true;
14369 switch (fcode)
14371 case ALTIVEC_BUILTIN_STVX_V2DF:
14372 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df, exp);
14373 case ALTIVEC_BUILTIN_STVX_V2DI:
14374 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di, exp);
14375 case ALTIVEC_BUILTIN_STVX_V4SF:
14376 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf, exp);
14377 case ALTIVEC_BUILTIN_STVX:
14378 case ALTIVEC_BUILTIN_STVX_V4SI:
14379 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si, exp);
14380 case ALTIVEC_BUILTIN_STVX_V8HI:
14381 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi, exp);
14382 case ALTIVEC_BUILTIN_STVX_V16QI:
14383 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi, exp);
14384 case ALTIVEC_BUILTIN_STVEBX:
14385 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
14386 case ALTIVEC_BUILTIN_STVEHX:
14387 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
14388 case ALTIVEC_BUILTIN_STVEWX:
14389 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
14390 case ALTIVEC_BUILTIN_STVXL_V2DF:
14391 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df, exp);
14392 case ALTIVEC_BUILTIN_STVXL_V2DI:
14393 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di, exp);
14394 case ALTIVEC_BUILTIN_STVXL_V4SF:
14395 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf, exp);
14396 case ALTIVEC_BUILTIN_STVXL:
14397 case ALTIVEC_BUILTIN_STVXL_V4SI:
14398 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si, exp);
14399 case ALTIVEC_BUILTIN_STVXL_V8HI:
14400 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi, exp);
14401 case ALTIVEC_BUILTIN_STVXL_V16QI:
14402 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi, exp);
14404 case ALTIVEC_BUILTIN_STVLX:
14405 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
14406 case ALTIVEC_BUILTIN_STVLXL:
14407 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
14408 case ALTIVEC_BUILTIN_STVRX:
14409 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
14410 case ALTIVEC_BUILTIN_STVRXL:
14411 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
14413 case P9V_BUILTIN_STXVL:
14414 return altivec_expand_stxvl_builtin (CODE_FOR_stxvl, exp);
14416 case P9V_BUILTIN_XST_LEN_R:
14417 return altivec_expand_stxvl_builtin (CODE_FOR_xst_len_r, exp);
14419 case VSX_BUILTIN_STXVD2X_V1TI:
14420 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti, exp);
14421 case VSX_BUILTIN_STXVD2X_V2DF:
14422 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
14423 case VSX_BUILTIN_STXVD2X_V2DI:
14424 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
14425 case VSX_BUILTIN_STXVW4X_V4SF:
14426 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
14427 case VSX_BUILTIN_STXVW4X_V4SI:
14428 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
14429 case VSX_BUILTIN_STXVW4X_V8HI:
14430 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
14431 case VSX_BUILTIN_STXVW4X_V16QI:
14432 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
14434 /* For the following on big endian, it's ok to use any appropriate
14435 unaligned-supporting store, so use a generic expander. For
14436 little-endian, the exact element-reversing instruction must
14437 be used. */
14438 case VSX_BUILTIN_ST_ELEMREV_V1TI:
14440 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v1ti
14441 : CODE_FOR_vsx_st_elemrev_v1ti);
14442 return altivec_expand_stv_builtin (code, exp);
14444 case VSX_BUILTIN_ST_ELEMREV_V2DF:
14446 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2df
14447 : CODE_FOR_vsx_st_elemrev_v2df);
14448 return altivec_expand_stv_builtin (code, exp);
14450 case VSX_BUILTIN_ST_ELEMREV_V2DI:
14452 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2di
14453 : CODE_FOR_vsx_st_elemrev_v2di);
14454 return altivec_expand_stv_builtin (code, exp);
14456 case VSX_BUILTIN_ST_ELEMREV_V4SF:
14458 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4sf
14459 : CODE_FOR_vsx_st_elemrev_v4sf);
14460 return altivec_expand_stv_builtin (code, exp);
14462 case VSX_BUILTIN_ST_ELEMREV_V4SI:
14464 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4si
14465 : CODE_FOR_vsx_st_elemrev_v4si);
14466 return altivec_expand_stv_builtin (code, exp);
14468 case VSX_BUILTIN_ST_ELEMREV_V8HI:
14470 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v8hi
14471 : CODE_FOR_vsx_st_elemrev_v8hi);
14472 return altivec_expand_stv_builtin (code, exp);
14474 case VSX_BUILTIN_ST_ELEMREV_V16QI:
14476 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v16qi
14477 : CODE_FOR_vsx_st_elemrev_v16qi);
14478 return altivec_expand_stv_builtin (code, exp);
14481 case ALTIVEC_BUILTIN_MFVSCR:
14482 icode = CODE_FOR_altivec_mfvscr;
14483 tmode = insn_data[icode].operand[0].mode;
14485 if (target == 0
14486 || GET_MODE (target) != tmode
14487 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14488 target = gen_reg_rtx (tmode);
14490 pat = GEN_FCN (icode) (target);
14491 if (! pat)
14492 return 0;
14493 emit_insn (pat);
14494 return target;
14496 case ALTIVEC_BUILTIN_MTVSCR:
14497 icode = CODE_FOR_altivec_mtvscr;
14498 arg0 = CALL_EXPR_ARG (exp, 0);
14499 op0 = expand_normal (arg0);
14500 mode0 = insn_data[icode].operand[0].mode;
14502 /* If we got invalid arguments bail out before generating bad rtl. */
14503 if (arg0 == error_mark_node)
14504 return const0_rtx;
14506 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14507 op0 = copy_to_mode_reg (mode0, op0);
14509 pat = GEN_FCN (icode) (op0);
14510 if (pat)
14511 emit_insn (pat);
14512 return NULL_RTX;
14514 case ALTIVEC_BUILTIN_DSSALL:
14515 emit_insn (gen_altivec_dssall ());
14516 return NULL_RTX;
14518 case ALTIVEC_BUILTIN_DSS:
14519 icode = CODE_FOR_altivec_dss;
14520 arg0 = CALL_EXPR_ARG (exp, 0);
14521 STRIP_NOPS (arg0);
14522 op0 = expand_normal (arg0);
14523 mode0 = insn_data[icode].operand[0].mode;
14525 /* If we got invalid arguments bail out before generating bad rtl. */
14526 if (arg0 == error_mark_node)
14527 return const0_rtx;
14529 if (TREE_CODE (arg0) != INTEGER_CST
14530 || TREE_INT_CST_LOW (arg0) & ~0x3)
14532 error ("argument to %qs must be a 2-bit unsigned literal", "dss");
14533 return const0_rtx;
14536 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14537 op0 = copy_to_mode_reg (mode0, op0);
14539 emit_insn (gen_altivec_dss (op0));
14540 return NULL_RTX;
14542 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
14543 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
14544 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
14545 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
14546 case VSX_BUILTIN_VEC_INIT_V2DF:
14547 case VSX_BUILTIN_VEC_INIT_V2DI:
14548 case VSX_BUILTIN_VEC_INIT_V1TI:
14549 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
14551 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
14552 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
14553 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
14554 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
14555 case VSX_BUILTIN_VEC_SET_V2DF:
14556 case VSX_BUILTIN_VEC_SET_V2DI:
14557 case VSX_BUILTIN_VEC_SET_V1TI:
14558 return altivec_expand_vec_set_builtin (exp);
14560 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
14561 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
14562 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
14563 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
14564 case VSX_BUILTIN_VEC_EXT_V2DF:
14565 case VSX_BUILTIN_VEC_EXT_V2DI:
14566 case VSX_BUILTIN_VEC_EXT_V1TI:
14567 return altivec_expand_vec_ext_builtin (exp, target);
14569 case P9V_BUILTIN_VEC_EXTRACT4B:
14570 arg1 = CALL_EXPR_ARG (exp, 1);
14571 STRIP_NOPS (arg1);
14573 /* Generate a normal call if it is invalid. */
14574 if (arg1 == error_mark_node)
14575 return expand_call (exp, target, false);
14577 if (TREE_CODE (arg1) != INTEGER_CST || TREE_INT_CST_LOW (arg1) > 12)
14579 error ("second argument to %qs must be [0, 12]", "vec_vextract4b");
14580 return expand_call (exp, target, false);
14582 break;
14584 case P9V_BUILTIN_VEC_INSERT4B:
14585 arg2 = CALL_EXPR_ARG (exp, 2);
14586 STRIP_NOPS (arg2);
14588 /* Generate a normal call if it is invalid. */
14589 if (arg2 == error_mark_node)
14590 return expand_call (exp, target, false);
14592 if (TREE_CODE (arg2) != INTEGER_CST || TREE_INT_CST_LOW (arg2) > 12)
14594 error ("third argument to %qs must be [0, 12]", "vec_vinsert4b");
14595 return expand_call (exp, target, false);
14597 break;
14599 default:
14600 break;
14601 /* Fall through. */
14604 /* Expand abs* operations. */
14605 d = bdesc_abs;
14606 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
14607 if (d->code == fcode)
14608 return altivec_expand_abs_builtin (d->icode, exp, target);
14610 /* Expand the AltiVec predicates. */
14611 d = bdesc_altivec_preds;
14612 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
14613 if (d->code == fcode)
14614 return altivec_expand_predicate_builtin (d->icode, exp, target);
14616 /* LV* are funky. We initialized them differently. */
14617 switch (fcode)
14619 case ALTIVEC_BUILTIN_LVSL:
14620 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
14621 exp, target, false);
14622 case ALTIVEC_BUILTIN_LVSR:
14623 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
14624 exp, target, false);
14625 case ALTIVEC_BUILTIN_LVEBX:
14626 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
14627 exp, target, false);
14628 case ALTIVEC_BUILTIN_LVEHX:
14629 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
14630 exp, target, false);
14631 case ALTIVEC_BUILTIN_LVEWX:
14632 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
14633 exp, target, false);
14634 case ALTIVEC_BUILTIN_LVXL_V2DF:
14635 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df,
14636 exp, target, false);
14637 case ALTIVEC_BUILTIN_LVXL_V2DI:
14638 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di,
14639 exp, target, false);
14640 case ALTIVEC_BUILTIN_LVXL_V4SF:
14641 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf,
14642 exp, target, false);
14643 case ALTIVEC_BUILTIN_LVXL:
14644 case ALTIVEC_BUILTIN_LVXL_V4SI:
14645 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si,
14646 exp, target, false);
14647 case ALTIVEC_BUILTIN_LVXL_V8HI:
14648 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi,
14649 exp, target, false);
14650 case ALTIVEC_BUILTIN_LVXL_V16QI:
14651 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi,
14652 exp, target, false);
14653 case ALTIVEC_BUILTIN_LVX_V1TI:
14654 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v1ti,
14655 exp, target, false);
14656 case ALTIVEC_BUILTIN_LVX_V2DF:
14657 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df,
14658 exp, target, false);
14659 case ALTIVEC_BUILTIN_LVX_V2DI:
14660 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di,
14661 exp, target, false);
14662 case ALTIVEC_BUILTIN_LVX_V4SF:
14663 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf,
14664 exp, target, false);
14665 case ALTIVEC_BUILTIN_LVX:
14666 case ALTIVEC_BUILTIN_LVX_V4SI:
14667 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si,
14668 exp, target, false);
14669 case ALTIVEC_BUILTIN_LVX_V8HI:
14670 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi,
14671 exp, target, false);
14672 case ALTIVEC_BUILTIN_LVX_V16QI:
14673 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi,
14674 exp, target, false);
14675 case ALTIVEC_BUILTIN_LVLX:
14676 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
14677 exp, target, true);
14678 case ALTIVEC_BUILTIN_LVLXL:
14679 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
14680 exp, target, true);
14681 case ALTIVEC_BUILTIN_LVRX:
14682 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
14683 exp, target, true);
14684 case ALTIVEC_BUILTIN_LVRXL:
14685 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
14686 exp, target, true);
14687 case VSX_BUILTIN_LXVD2X_V1TI:
14688 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti,
14689 exp, target, false);
14690 case VSX_BUILTIN_LXVD2X_V2DF:
14691 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
14692 exp, target, false);
14693 case VSX_BUILTIN_LXVD2X_V2DI:
14694 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
14695 exp, target, false);
14696 case VSX_BUILTIN_LXVW4X_V4SF:
14697 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
14698 exp, target, false);
14699 case VSX_BUILTIN_LXVW4X_V4SI:
14700 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
14701 exp, target, false);
14702 case VSX_BUILTIN_LXVW4X_V8HI:
14703 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
14704 exp, target, false);
14705 case VSX_BUILTIN_LXVW4X_V16QI:
14706 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
14707 exp, target, false);
14708 /* For the following on big endian, it's ok to use any appropriate
14709 unaligned-supporting load, so use a generic expander. For
14710 little-endian, the exact element-reversing instruction must
14711 be used. */
14712 case VSX_BUILTIN_LD_ELEMREV_V2DF:
14714 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2df
14715 : CODE_FOR_vsx_ld_elemrev_v2df);
14716 return altivec_expand_lv_builtin (code, exp, target, false);
14718 case VSX_BUILTIN_LD_ELEMREV_V1TI:
14720 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v1ti
14721 : CODE_FOR_vsx_ld_elemrev_v1ti);
14722 return altivec_expand_lv_builtin (code, exp, target, false);
14724 case VSX_BUILTIN_LD_ELEMREV_V2DI:
14726 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2di
14727 : CODE_FOR_vsx_ld_elemrev_v2di);
14728 return altivec_expand_lv_builtin (code, exp, target, false);
14730 case VSX_BUILTIN_LD_ELEMREV_V4SF:
14732 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4sf
14733 : CODE_FOR_vsx_ld_elemrev_v4sf);
14734 return altivec_expand_lv_builtin (code, exp, target, false);
14736 case VSX_BUILTIN_LD_ELEMREV_V4SI:
14738 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4si
14739 : CODE_FOR_vsx_ld_elemrev_v4si);
14740 return altivec_expand_lv_builtin (code, exp, target, false);
14742 case VSX_BUILTIN_LD_ELEMREV_V8HI:
14744 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v8hi
14745 : CODE_FOR_vsx_ld_elemrev_v8hi);
14746 return altivec_expand_lv_builtin (code, exp, target, false);
14748 case VSX_BUILTIN_LD_ELEMREV_V16QI:
14750 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v16qi
14751 : CODE_FOR_vsx_ld_elemrev_v16qi);
14752 return altivec_expand_lv_builtin (code, exp, target, false);
14754 break;
14755 default:
14756 break;
14757 /* Fall through. */
14760 *expandedp = false;
14761 return NULL_RTX;
14764 /* Check whether a builtin function is supported in this target
14765 configuration. */
14766 bool
14767 rs6000_builtin_is_supported_p (enum rs6000_builtins fncode)
14769 HOST_WIDE_INT fnmask = rs6000_builtin_info[fncode].mask;
14770 if ((fnmask & rs6000_builtin_mask) != fnmask)
14771 return false;
14772 else
14773 return true;
14776 /* Raise an error message for a builtin function that is called without the
14777 appropriate target options being set. */
14779 static void
14780 rs6000_invalid_builtin (enum rs6000_builtins fncode)
14782 size_t uns_fncode = (size_t) fncode;
14783 const char *name = rs6000_builtin_info[uns_fncode].name;
14784 HOST_WIDE_INT fnmask = rs6000_builtin_info[uns_fncode].mask;
14786 gcc_assert (name != NULL);
14787 if ((fnmask & RS6000_BTM_CELL) != 0)
14788 error ("builtin function %qs is only valid for the cell processor", name);
14789 else if ((fnmask & RS6000_BTM_VSX) != 0)
14790 error ("builtin function %qs requires the %qs option", name, "-mvsx");
14791 else if ((fnmask & RS6000_BTM_HTM) != 0)
14792 error ("builtin function %qs requires the %qs option", name, "-mhtm");
14793 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
14794 error ("builtin function %qs requires the %qs option", name, "-maltivec");
14795 else if ((fnmask & (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
14796 == (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
14797 error ("builtin function %qs requires the %qs and %qs options",
14798 name, "-mhard-dfp", "-mpower8-vector");
14799 else if ((fnmask & RS6000_BTM_DFP) != 0)
14800 error ("builtin function %qs requires the %qs option", name, "-mhard-dfp");
14801 else if ((fnmask & RS6000_BTM_P8_VECTOR) != 0)
14802 error ("builtin function %qs requires the %qs option", name,
14803 "-mpower8-vector");
14804 else if ((fnmask & (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
14805 == (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
14806 error ("builtin function %qs requires the %qs and %qs options",
14807 name, "-mcpu=power9", "-m64");
14808 else if ((fnmask & RS6000_BTM_P9_VECTOR) != 0)
14809 error ("builtin function %qs requires the %qs option", name,
14810 "-mcpu=power9");
14811 else if ((fnmask & (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
14812 == (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
14813 error ("builtin function %qs requires the %qs and %qs options",
14814 name, "-mcpu=power9", "-m64");
14815 else if ((fnmask & RS6000_BTM_P9_MISC) == RS6000_BTM_P9_MISC)
14816 error ("builtin function %qs requires the %qs option", name,
14817 "-mcpu=power9");
14818 else if ((fnmask & RS6000_BTM_LDBL128) == RS6000_BTM_LDBL128)
14820 if (!TARGET_HARD_FLOAT)
14821 error ("builtin function %qs requires the %qs option", name,
14822 "-mhard-float");
14823 else
14824 error ("builtin function %qs requires the %qs option", name,
14825 TARGET_IEEEQUAD ? "-mabi=ibmlongdouble" : "-mlong-double-128");
14827 else if ((fnmask & RS6000_BTM_HARD_FLOAT) != 0)
14828 error ("builtin function %qs requires the %qs option", name,
14829 "-mhard-float");
14830 else if ((fnmask & RS6000_BTM_FLOAT128_HW) != 0)
14831 error ("builtin function %qs requires ISA 3.0 IEEE 128-bit floating point",
14832 name);
14833 else if ((fnmask & RS6000_BTM_FLOAT128) != 0)
14834 error ("builtin function %qs requires the %qs option", name,
14835 "%<-mfloat128%>");
14836 else if ((fnmask & (RS6000_BTM_POPCNTD | RS6000_BTM_POWERPC64))
14837 == (RS6000_BTM_POPCNTD | RS6000_BTM_POWERPC64))
14838 error ("builtin function %qs requires the %qs (or newer), and "
14839 "%qs or %qs options",
14840 name, "-mcpu=power7", "-m64", "-mpowerpc64");
14841 else
14842 error ("builtin function %qs is not supported with the current options",
14843 name);
14846 /* Target hook for early folding of built-ins, shamelessly stolen
14847 from ia64.c. */
14849 static tree
14850 rs6000_fold_builtin (tree fndecl ATTRIBUTE_UNUSED,
14851 int n_args ATTRIBUTE_UNUSED,
14852 tree *args ATTRIBUTE_UNUSED,
14853 bool ignore ATTRIBUTE_UNUSED)
14855 #ifdef SUBTARGET_FOLD_BUILTIN
14856 return SUBTARGET_FOLD_BUILTIN (fndecl, n_args, args, ignore);
14857 #else
14858 return NULL_TREE;
14859 #endif
14862 /* Helper function to sort out which built-ins may be valid without having
14863 a LHS. */
14864 static bool
14865 rs6000_builtin_valid_without_lhs (enum rs6000_builtins fn_code)
14867 switch (fn_code)
14869 case ALTIVEC_BUILTIN_STVX_V16QI:
14870 case ALTIVEC_BUILTIN_STVX_V8HI:
14871 case ALTIVEC_BUILTIN_STVX_V4SI:
14872 case ALTIVEC_BUILTIN_STVX_V4SF:
14873 case ALTIVEC_BUILTIN_STVX_V2DI:
14874 case ALTIVEC_BUILTIN_STVX_V2DF:
14875 case VSX_BUILTIN_STXVW4X_V16QI:
14876 case VSX_BUILTIN_STXVW4X_V8HI:
14877 case VSX_BUILTIN_STXVW4X_V4SF:
14878 case VSX_BUILTIN_STXVW4X_V4SI:
14879 case VSX_BUILTIN_STXVD2X_V2DF:
14880 case VSX_BUILTIN_STXVD2X_V2DI:
14881 return true;
14882 default:
14883 return false;
14887 /* Helper function to handle the gimple folding of a vector compare
14888 operation. This sets up true/false vectors, and uses the
14889 VEC_COND_EXPR operation.
14890 CODE indicates which comparison is to be made. (EQ, GT, ...).
14891 TYPE indicates the type of the result. */
14892 static tree
14893 fold_build_vec_cmp (tree_code code, tree type,
14894 tree arg0, tree arg1)
14896 tree cmp_type = build_same_sized_truth_vector_type (type);
14897 tree zero_vec = build_zero_cst (type);
14898 tree minus_one_vec = build_minus_one_cst (type);
14899 tree cmp = fold_build2 (code, cmp_type, arg0, arg1);
14900 return fold_build3 (VEC_COND_EXPR, type, cmp, minus_one_vec, zero_vec);
14903 /* Helper function to handle the in-between steps for the
14904 vector compare built-ins. */
14905 static void
14906 fold_compare_helper (gimple_stmt_iterator *gsi, tree_code code, gimple *stmt)
14908 tree arg0 = gimple_call_arg (stmt, 0);
14909 tree arg1 = gimple_call_arg (stmt, 1);
14910 tree lhs = gimple_call_lhs (stmt);
14911 tree cmp = fold_build_vec_cmp (code, TREE_TYPE (lhs), arg0, arg1);
14912 gimple *g = gimple_build_assign (lhs, cmp);
14913 gimple_set_location (g, gimple_location (stmt));
14914 gsi_replace (gsi, g, true);
14917 /* Helper function to map V2DF and V4SF types to their
14918 integral equivalents (V2DI and V4SI). */
14919 tree map_to_integral_tree_type (tree input_tree_type)
14921 if (INTEGRAL_TYPE_P (TREE_TYPE (input_tree_type)))
14922 return input_tree_type;
14923 else
14925 if (types_compatible_p (TREE_TYPE (input_tree_type),
14926 TREE_TYPE (V2DF_type_node)))
14927 return V2DI_type_node;
14928 else if (types_compatible_p (TREE_TYPE (input_tree_type),
14929 TREE_TYPE (V4SF_type_node)))
14930 return V4SI_type_node;
14931 else
14932 gcc_unreachable ();
14936 /* Helper function to handle the vector merge[hl] built-ins. The
14937 implementation difference between h and l versions for this code are in
14938 the values used when building of the permute vector for high word versus
14939 low word merge. The variance is keyed off the use_high parameter. */
14940 static void
14941 fold_mergehl_helper (gimple_stmt_iterator *gsi, gimple *stmt, int use_high)
14943 tree arg0 = gimple_call_arg (stmt, 0);
14944 tree arg1 = gimple_call_arg (stmt, 1);
14945 tree lhs = gimple_call_lhs (stmt);
14946 tree lhs_type = TREE_TYPE (lhs);
14947 int n_elts = TYPE_VECTOR_SUBPARTS (lhs_type);
14948 int midpoint = n_elts / 2;
14949 int offset = 0;
14951 if (use_high == 1)
14952 offset = midpoint;
14954 /* The permute_type will match the lhs for integral types. For double and
14955 float types, the permute type needs to map to the V2 or V4 type that
14956 matches size. */
14957 tree permute_type;
14958 permute_type = map_to_integral_tree_type (lhs_type);
14959 tree_vector_builder elts (permute_type, VECTOR_CST_NELTS (arg0), 1);
14961 for (int i = 0; i < midpoint; i++)
14963 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
14964 offset + i));
14965 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
14966 offset + n_elts + i));
14969 tree permute = elts.build ();
14971 gimple *g = gimple_build_assign (lhs, VEC_PERM_EXPR, arg0, arg1, permute);
14972 gimple_set_location (g, gimple_location (stmt));
14973 gsi_replace (gsi, g, true);
14976 /* Helper function to handle the vector merge[eo] built-ins. */
14977 static void
14978 fold_mergeeo_helper (gimple_stmt_iterator *gsi, gimple *stmt, int use_odd)
14980 tree arg0 = gimple_call_arg (stmt, 0);
14981 tree arg1 = gimple_call_arg (stmt, 1);
14982 tree lhs = gimple_call_lhs (stmt);
14983 tree lhs_type = TREE_TYPE (lhs);
14984 int n_elts = TYPE_VECTOR_SUBPARTS (lhs_type);
14986 /* The permute_type will match the lhs for integral types. For double and
14987 float types, the permute type needs to map to the V2 or V4 type that
14988 matches size. */
14989 tree permute_type;
14990 permute_type = map_to_integral_tree_type (lhs_type);
14992 tree_vector_builder elts (permute_type, VECTOR_CST_NELTS (arg0), 1);
14994 /* Build the permute vector. */
14995 for (int i = 0; i < n_elts / 2; i++)
14997 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
14998 2*i + use_odd));
14999 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15000 2*i + use_odd + n_elts));
15003 tree permute = elts.build ();
15005 gimple *g = gimple_build_assign (lhs, VEC_PERM_EXPR, arg0, arg1, permute);
15006 gimple_set_location (g, gimple_location (stmt));
15007 gsi_replace (gsi, g, true);
15010 /* Fold a machine-dependent built-in in GIMPLE. (For folding into
15011 a constant, use rs6000_fold_builtin.) */
15013 bool
15014 rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
15016 gimple *stmt = gsi_stmt (*gsi);
15017 tree fndecl = gimple_call_fndecl (stmt);
15018 gcc_checking_assert (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD);
15019 enum rs6000_builtins fn_code
15020 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15021 tree arg0, arg1, lhs, temp;
15022 enum tree_code bcode;
15023 gimple *g;
15025 size_t uns_fncode = (size_t) fn_code;
15026 enum insn_code icode = rs6000_builtin_info[uns_fncode].icode;
15027 const char *fn_name1 = rs6000_builtin_info[uns_fncode].name;
15028 const char *fn_name2 = (icode != CODE_FOR_nothing)
15029 ? get_insn_name ((int) icode)
15030 : "nothing";
15032 if (TARGET_DEBUG_BUILTIN)
15033 fprintf (stderr, "rs6000_gimple_fold_builtin %d %s %s\n",
15034 fn_code, fn_name1, fn_name2);
15036 if (!rs6000_fold_gimple)
15037 return false;
15039 /* Prevent gimple folding for code that does not have a LHS, unless it is
15040 allowed per the rs6000_builtin_valid_without_lhs helper function. */
15041 if (!gimple_call_lhs (stmt) && !rs6000_builtin_valid_without_lhs (fn_code))
15042 return false;
15044 /* Don't fold invalid builtins, let rs6000_expand_builtin diagnose it. */
15045 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fncode].mask;
15046 bool func_valid_p = (rs6000_builtin_mask & mask) == mask;
15047 if (!func_valid_p)
15048 return false;
15050 switch (fn_code)
15052 /* Flavors of vec_add. We deliberately don't expand
15053 P8V_BUILTIN_VADDUQM as it gets lowered from V1TImode to
15054 TImode, resulting in much poorer code generation. */
15055 case ALTIVEC_BUILTIN_VADDUBM:
15056 case ALTIVEC_BUILTIN_VADDUHM:
15057 case ALTIVEC_BUILTIN_VADDUWM:
15058 case P8V_BUILTIN_VADDUDM:
15059 case ALTIVEC_BUILTIN_VADDFP:
15060 case VSX_BUILTIN_XVADDDP:
15061 bcode = PLUS_EXPR;
15062 do_binary:
15063 arg0 = gimple_call_arg (stmt, 0);
15064 arg1 = gimple_call_arg (stmt, 1);
15065 lhs = gimple_call_lhs (stmt);
15066 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (lhs)))
15067 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (lhs))))
15069 /* Ensure the binary operation is performed in a type
15070 that wraps if it is integral type. */
15071 gimple_seq stmts = NULL;
15072 tree type = unsigned_type_for (TREE_TYPE (lhs));
15073 tree uarg0 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15074 type, arg0);
15075 tree uarg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15076 type, arg1);
15077 tree res = gimple_build (&stmts, gimple_location (stmt), bcode,
15078 type, uarg0, uarg1);
15079 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15080 g = gimple_build_assign (lhs, VIEW_CONVERT_EXPR,
15081 build1 (VIEW_CONVERT_EXPR,
15082 TREE_TYPE (lhs), res));
15083 gsi_replace (gsi, g, true);
15084 return true;
15086 g = gimple_build_assign (lhs, bcode, arg0, arg1);
15087 gimple_set_location (g, gimple_location (stmt));
15088 gsi_replace (gsi, g, true);
15089 return true;
15090 /* Flavors of vec_sub. We deliberately don't expand
15091 P8V_BUILTIN_VSUBUQM. */
15092 case ALTIVEC_BUILTIN_VSUBUBM:
15093 case ALTIVEC_BUILTIN_VSUBUHM:
15094 case ALTIVEC_BUILTIN_VSUBUWM:
15095 case P8V_BUILTIN_VSUBUDM:
15096 case ALTIVEC_BUILTIN_VSUBFP:
15097 case VSX_BUILTIN_XVSUBDP:
15098 bcode = MINUS_EXPR;
15099 goto do_binary;
15100 case VSX_BUILTIN_XVMULSP:
15101 case VSX_BUILTIN_XVMULDP:
15102 arg0 = gimple_call_arg (stmt, 0);
15103 arg1 = gimple_call_arg (stmt, 1);
15104 lhs = gimple_call_lhs (stmt);
15105 g = gimple_build_assign (lhs, MULT_EXPR, arg0, arg1);
15106 gimple_set_location (g, gimple_location (stmt));
15107 gsi_replace (gsi, g, true);
15108 return true;
15109 /* Even element flavors of vec_mul (signed). */
15110 case ALTIVEC_BUILTIN_VMULESB:
15111 case ALTIVEC_BUILTIN_VMULESH:
15112 case P8V_BUILTIN_VMULESW:
15113 /* Even element flavors of vec_mul (unsigned). */
15114 case ALTIVEC_BUILTIN_VMULEUB:
15115 case ALTIVEC_BUILTIN_VMULEUH:
15116 case P8V_BUILTIN_VMULEUW:
15117 arg0 = gimple_call_arg (stmt, 0);
15118 arg1 = gimple_call_arg (stmt, 1);
15119 lhs = gimple_call_lhs (stmt);
15120 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_EVEN_EXPR, arg0, arg1);
15121 gimple_set_location (g, gimple_location (stmt));
15122 gsi_replace (gsi, g, true);
15123 return true;
15124 /* Odd element flavors of vec_mul (signed). */
15125 case ALTIVEC_BUILTIN_VMULOSB:
15126 case ALTIVEC_BUILTIN_VMULOSH:
15127 case P8V_BUILTIN_VMULOSW:
15128 /* Odd element flavors of vec_mul (unsigned). */
15129 case ALTIVEC_BUILTIN_VMULOUB:
15130 case ALTIVEC_BUILTIN_VMULOUH:
15131 case P8V_BUILTIN_VMULOUW:
15132 arg0 = gimple_call_arg (stmt, 0);
15133 arg1 = gimple_call_arg (stmt, 1);
15134 lhs = gimple_call_lhs (stmt);
15135 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_ODD_EXPR, arg0, arg1);
15136 gimple_set_location (g, gimple_location (stmt));
15137 gsi_replace (gsi, g, true);
15138 return true;
15139 /* Flavors of vec_div (Integer). */
15140 case VSX_BUILTIN_DIV_V2DI:
15141 case VSX_BUILTIN_UDIV_V2DI:
15142 arg0 = gimple_call_arg (stmt, 0);
15143 arg1 = gimple_call_arg (stmt, 1);
15144 lhs = gimple_call_lhs (stmt);
15145 g = gimple_build_assign (lhs, TRUNC_DIV_EXPR, arg0, arg1);
15146 gimple_set_location (g, gimple_location (stmt));
15147 gsi_replace (gsi, g, true);
15148 return true;
15149 /* Flavors of vec_div (Float). */
15150 case VSX_BUILTIN_XVDIVSP:
15151 case VSX_BUILTIN_XVDIVDP:
15152 arg0 = gimple_call_arg (stmt, 0);
15153 arg1 = gimple_call_arg (stmt, 1);
15154 lhs = gimple_call_lhs (stmt);
15155 g = gimple_build_assign (lhs, RDIV_EXPR, arg0, arg1);
15156 gimple_set_location (g, gimple_location (stmt));
15157 gsi_replace (gsi, g, true);
15158 return true;
15159 /* Flavors of vec_and. */
15160 case ALTIVEC_BUILTIN_VAND:
15161 arg0 = gimple_call_arg (stmt, 0);
15162 arg1 = gimple_call_arg (stmt, 1);
15163 lhs = gimple_call_lhs (stmt);
15164 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, arg1);
15165 gimple_set_location (g, gimple_location (stmt));
15166 gsi_replace (gsi, g, true);
15167 return true;
15168 /* Flavors of vec_andc. */
15169 case ALTIVEC_BUILTIN_VANDC:
15170 arg0 = gimple_call_arg (stmt, 0);
15171 arg1 = gimple_call_arg (stmt, 1);
15172 lhs = gimple_call_lhs (stmt);
15173 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15174 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
15175 gimple_set_location (g, gimple_location (stmt));
15176 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15177 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, temp);
15178 gimple_set_location (g, gimple_location (stmt));
15179 gsi_replace (gsi, g, true);
15180 return true;
15181 /* Flavors of vec_nand. */
15182 case P8V_BUILTIN_VEC_NAND:
15183 case P8V_BUILTIN_NAND_V16QI:
15184 case P8V_BUILTIN_NAND_V8HI:
15185 case P8V_BUILTIN_NAND_V4SI:
15186 case P8V_BUILTIN_NAND_V4SF:
15187 case P8V_BUILTIN_NAND_V2DF:
15188 case P8V_BUILTIN_NAND_V2DI:
15189 arg0 = gimple_call_arg (stmt, 0);
15190 arg1 = gimple_call_arg (stmt, 1);
15191 lhs = gimple_call_lhs (stmt);
15192 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15193 g = gimple_build_assign (temp, BIT_AND_EXPR, arg0, arg1);
15194 gimple_set_location (g, gimple_location (stmt));
15195 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15196 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15197 gimple_set_location (g, gimple_location (stmt));
15198 gsi_replace (gsi, g, true);
15199 return true;
15200 /* Flavors of vec_or. */
15201 case ALTIVEC_BUILTIN_VOR:
15202 arg0 = gimple_call_arg (stmt, 0);
15203 arg1 = gimple_call_arg (stmt, 1);
15204 lhs = gimple_call_lhs (stmt);
15205 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, arg1);
15206 gimple_set_location (g, gimple_location (stmt));
15207 gsi_replace (gsi, g, true);
15208 return true;
15209 /* flavors of vec_orc. */
15210 case P8V_BUILTIN_ORC_V16QI:
15211 case P8V_BUILTIN_ORC_V8HI:
15212 case P8V_BUILTIN_ORC_V4SI:
15213 case P8V_BUILTIN_ORC_V4SF:
15214 case P8V_BUILTIN_ORC_V2DF:
15215 case P8V_BUILTIN_ORC_V2DI:
15216 arg0 = gimple_call_arg (stmt, 0);
15217 arg1 = gimple_call_arg (stmt, 1);
15218 lhs = gimple_call_lhs (stmt);
15219 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15220 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
15221 gimple_set_location (g, gimple_location (stmt));
15222 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15223 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, temp);
15224 gimple_set_location (g, gimple_location (stmt));
15225 gsi_replace (gsi, g, true);
15226 return true;
15227 /* Flavors of vec_xor. */
15228 case ALTIVEC_BUILTIN_VXOR:
15229 arg0 = gimple_call_arg (stmt, 0);
15230 arg1 = gimple_call_arg (stmt, 1);
15231 lhs = gimple_call_lhs (stmt);
15232 g = gimple_build_assign (lhs, BIT_XOR_EXPR, arg0, arg1);
15233 gimple_set_location (g, gimple_location (stmt));
15234 gsi_replace (gsi, g, true);
15235 return true;
15236 /* Flavors of vec_nor. */
15237 case ALTIVEC_BUILTIN_VNOR:
15238 arg0 = gimple_call_arg (stmt, 0);
15239 arg1 = gimple_call_arg (stmt, 1);
15240 lhs = gimple_call_lhs (stmt);
15241 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15242 g = gimple_build_assign (temp, BIT_IOR_EXPR, arg0, arg1);
15243 gimple_set_location (g, gimple_location (stmt));
15244 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15245 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15246 gimple_set_location (g, gimple_location (stmt));
15247 gsi_replace (gsi, g, true);
15248 return true;
15249 /* flavors of vec_abs. */
15250 case ALTIVEC_BUILTIN_ABS_V16QI:
15251 case ALTIVEC_BUILTIN_ABS_V8HI:
15252 case ALTIVEC_BUILTIN_ABS_V4SI:
15253 case ALTIVEC_BUILTIN_ABS_V4SF:
15254 case P8V_BUILTIN_ABS_V2DI:
15255 case VSX_BUILTIN_XVABSDP:
15256 arg0 = gimple_call_arg (stmt, 0);
15257 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0)))
15258 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0))))
15259 return false;
15260 lhs = gimple_call_lhs (stmt);
15261 g = gimple_build_assign (lhs, ABS_EXPR, arg0);
15262 gimple_set_location (g, gimple_location (stmt));
15263 gsi_replace (gsi, g, true);
15264 return true;
15265 /* flavors of vec_min. */
15266 case VSX_BUILTIN_XVMINDP:
15267 case P8V_BUILTIN_VMINSD:
15268 case P8V_BUILTIN_VMINUD:
15269 case ALTIVEC_BUILTIN_VMINSB:
15270 case ALTIVEC_BUILTIN_VMINSH:
15271 case ALTIVEC_BUILTIN_VMINSW:
15272 case ALTIVEC_BUILTIN_VMINUB:
15273 case ALTIVEC_BUILTIN_VMINUH:
15274 case ALTIVEC_BUILTIN_VMINUW:
15275 case ALTIVEC_BUILTIN_VMINFP:
15276 arg0 = gimple_call_arg (stmt, 0);
15277 arg1 = gimple_call_arg (stmt, 1);
15278 lhs = gimple_call_lhs (stmt);
15279 g = gimple_build_assign (lhs, MIN_EXPR, arg0, arg1);
15280 gimple_set_location (g, gimple_location (stmt));
15281 gsi_replace (gsi, g, true);
15282 return true;
15283 /* flavors of vec_max. */
15284 case VSX_BUILTIN_XVMAXDP:
15285 case P8V_BUILTIN_VMAXSD:
15286 case P8V_BUILTIN_VMAXUD:
15287 case ALTIVEC_BUILTIN_VMAXSB:
15288 case ALTIVEC_BUILTIN_VMAXSH:
15289 case ALTIVEC_BUILTIN_VMAXSW:
15290 case ALTIVEC_BUILTIN_VMAXUB:
15291 case ALTIVEC_BUILTIN_VMAXUH:
15292 case ALTIVEC_BUILTIN_VMAXUW:
15293 case ALTIVEC_BUILTIN_VMAXFP:
15294 arg0 = gimple_call_arg (stmt, 0);
15295 arg1 = gimple_call_arg (stmt, 1);
15296 lhs = gimple_call_lhs (stmt);
15297 g = gimple_build_assign (lhs, MAX_EXPR, arg0, arg1);
15298 gimple_set_location (g, gimple_location (stmt));
15299 gsi_replace (gsi, g, true);
15300 return true;
15301 /* Flavors of vec_eqv. */
15302 case P8V_BUILTIN_EQV_V16QI:
15303 case P8V_BUILTIN_EQV_V8HI:
15304 case P8V_BUILTIN_EQV_V4SI:
15305 case P8V_BUILTIN_EQV_V4SF:
15306 case P8V_BUILTIN_EQV_V2DF:
15307 case P8V_BUILTIN_EQV_V2DI:
15308 arg0 = gimple_call_arg (stmt, 0);
15309 arg1 = gimple_call_arg (stmt, 1);
15310 lhs = gimple_call_lhs (stmt);
15311 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15312 g = gimple_build_assign (temp, BIT_XOR_EXPR, arg0, arg1);
15313 gimple_set_location (g, gimple_location (stmt));
15314 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15315 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15316 gimple_set_location (g, gimple_location (stmt));
15317 gsi_replace (gsi, g, true);
15318 return true;
15319 /* Flavors of vec_rotate_left. */
15320 case ALTIVEC_BUILTIN_VRLB:
15321 case ALTIVEC_BUILTIN_VRLH:
15322 case ALTIVEC_BUILTIN_VRLW:
15323 case P8V_BUILTIN_VRLD:
15324 arg0 = gimple_call_arg (stmt, 0);
15325 arg1 = gimple_call_arg (stmt, 1);
15326 lhs = gimple_call_lhs (stmt);
15327 g = gimple_build_assign (lhs, LROTATE_EXPR, arg0, arg1);
15328 gimple_set_location (g, gimple_location (stmt));
15329 gsi_replace (gsi, g, true);
15330 return true;
15331 /* Flavors of vector shift right algebraic.
15332 vec_sra{b,h,w} -> vsra{b,h,w}. */
15333 case ALTIVEC_BUILTIN_VSRAB:
15334 case ALTIVEC_BUILTIN_VSRAH:
15335 case ALTIVEC_BUILTIN_VSRAW:
15336 case P8V_BUILTIN_VSRAD:
15338 arg0 = gimple_call_arg (stmt, 0);
15339 arg1 = gimple_call_arg (stmt, 1);
15340 lhs = gimple_call_lhs (stmt);
15341 tree arg1_type = TREE_TYPE (arg1);
15342 tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1));
15343 tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type));
15344 location_t loc = gimple_location (stmt);
15345 /* Force arg1 into the range valid matching the arg0 type. */
15346 /* Build a vector consisting of the max valid bit-size values. */
15347 int n_elts = VECTOR_CST_NELTS (arg1);
15348 tree element_size = build_int_cst (unsigned_element_type,
15349 128 / n_elts);
15350 tree_vector_builder elts (unsigned_arg1_type, n_elts, 1);
15351 for (int i = 0; i < n_elts; i++)
15352 elts.safe_push (element_size);
15353 tree modulo_tree = elts.build ();
15354 /* Modulo the provided shift value against that vector. */
15355 gimple_seq stmts = NULL;
15356 tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15357 unsigned_arg1_type, arg1);
15358 tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR,
15359 unsigned_arg1_type, unsigned_arg1,
15360 modulo_tree);
15361 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15362 /* And finally, do the shift. */
15363 g = gimple_build_assign (lhs, RSHIFT_EXPR, arg0, new_arg1);
15364 gimple_set_location (g, loc);
15365 gsi_replace (gsi, g, true);
15366 return true;
15368 /* Flavors of vector shift left.
15369 builtin_altivec_vsl{b,h,w} -> vsl{b,h,w}. */
15370 case ALTIVEC_BUILTIN_VSLB:
15371 case ALTIVEC_BUILTIN_VSLH:
15372 case ALTIVEC_BUILTIN_VSLW:
15373 case P8V_BUILTIN_VSLD:
15375 location_t loc;
15376 gimple_seq stmts = NULL;
15377 arg0 = gimple_call_arg (stmt, 0);
15378 tree arg0_type = TREE_TYPE (arg0);
15379 if (INTEGRAL_TYPE_P (TREE_TYPE (arg0_type))
15380 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg0_type)))
15381 return false;
15382 arg1 = gimple_call_arg (stmt, 1);
15383 tree arg1_type = TREE_TYPE (arg1);
15384 tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1));
15385 tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type));
15386 loc = gimple_location (stmt);
15387 lhs = gimple_call_lhs (stmt);
15388 /* Force arg1 into the range valid matching the arg0 type. */
15389 /* Build a vector consisting of the max valid bit-size values. */
15390 int n_elts = VECTOR_CST_NELTS (arg1);
15391 int tree_size_in_bits = TREE_INT_CST_LOW (size_in_bytes (arg1_type))
15392 * BITS_PER_UNIT;
15393 tree element_size = build_int_cst (unsigned_element_type,
15394 tree_size_in_bits / n_elts);
15395 tree_vector_builder elts (unsigned_type_for (arg1_type), n_elts, 1);
15396 for (int i = 0; i < n_elts; i++)
15397 elts.safe_push (element_size);
15398 tree modulo_tree = elts.build ();
15399 /* Modulo the provided shift value against that vector. */
15400 tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15401 unsigned_arg1_type, arg1);
15402 tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR,
15403 unsigned_arg1_type, unsigned_arg1,
15404 modulo_tree);
15405 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15406 /* And finally, do the shift. */
15407 g = gimple_build_assign (lhs, LSHIFT_EXPR, arg0, new_arg1);
15408 gimple_set_location (g, gimple_location (stmt));
15409 gsi_replace (gsi, g, true);
15410 return true;
15412 /* Flavors of vector shift right. */
15413 case ALTIVEC_BUILTIN_VSRB:
15414 case ALTIVEC_BUILTIN_VSRH:
15415 case ALTIVEC_BUILTIN_VSRW:
15416 case P8V_BUILTIN_VSRD:
15418 arg0 = gimple_call_arg (stmt, 0);
15419 arg1 = gimple_call_arg (stmt, 1);
15420 lhs = gimple_call_lhs (stmt);
15421 tree arg1_type = TREE_TYPE (arg1);
15422 tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1));
15423 tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type));
15424 location_t loc = gimple_location (stmt);
15425 gimple_seq stmts = NULL;
15426 /* Convert arg0 to unsigned. */
15427 tree arg0_unsigned
15428 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15429 unsigned_type_for (TREE_TYPE (arg0)), arg0);
15430 /* Force arg1 into the range valid matching the arg0 type. */
15431 /* Build a vector consisting of the max valid bit-size values. */
15432 int n_elts = VECTOR_CST_NELTS (arg1);
15433 tree element_size = build_int_cst (unsigned_element_type,
15434 128 / n_elts);
15435 tree_vector_builder elts (unsigned_arg1_type, n_elts, 1);
15436 for (int i = 0; i < n_elts; i++)
15437 elts.safe_push (element_size);
15438 tree modulo_tree = elts.build ();
15439 /* Modulo the provided shift value against that vector. */
15440 tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15441 unsigned_arg1_type, arg1);
15442 tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR,
15443 unsigned_arg1_type, unsigned_arg1,
15444 modulo_tree);
15445 /* Do the shift. */
15446 tree res
15447 = gimple_build (&stmts, RSHIFT_EXPR,
15448 TREE_TYPE (arg0_unsigned), arg0_unsigned, new_arg1);
15449 /* Convert result back to the lhs type. */
15450 res = gimple_build (&stmts, VIEW_CONVERT_EXPR, TREE_TYPE (lhs), res);
15451 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15452 update_call_from_tree (gsi, res);
15453 return true;
15455 /* Vector loads. */
15456 case ALTIVEC_BUILTIN_LVX_V16QI:
15457 case ALTIVEC_BUILTIN_LVX_V8HI:
15458 case ALTIVEC_BUILTIN_LVX_V4SI:
15459 case ALTIVEC_BUILTIN_LVX_V4SF:
15460 case ALTIVEC_BUILTIN_LVX_V2DI:
15461 case ALTIVEC_BUILTIN_LVX_V2DF:
15462 case ALTIVEC_BUILTIN_LVX_V1TI:
15464 arg0 = gimple_call_arg (stmt, 0); // offset
15465 arg1 = gimple_call_arg (stmt, 1); // address
15466 lhs = gimple_call_lhs (stmt);
15467 location_t loc = gimple_location (stmt);
15468 /* Since arg1 may be cast to a different type, just use ptr_type_node
15469 here instead of trying to enforce TBAA on pointer types. */
15470 tree arg1_type = ptr_type_node;
15471 tree lhs_type = TREE_TYPE (lhs);
15472 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15473 the tree using the value from arg0. The resulting type will match
15474 the type of arg1. */
15475 gimple_seq stmts = NULL;
15476 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0);
15477 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15478 arg1_type, arg1, temp_offset);
15479 /* Mask off any lower bits from the address. */
15480 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
15481 arg1_type, temp_addr,
15482 build_int_cst (arg1_type, -16));
15483 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15484 if (!is_gimple_mem_ref_addr (aligned_addr))
15486 tree t = make_ssa_name (TREE_TYPE (aligned_addr));
15487 gimple *g = gimple_build_assign (t, aligned_addr);
15488 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15489 aligned_addr = t;
15491 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
15492 take an offset, but since we've already incorporated the offset
15493 above, here we just pass in a zero. */
15494 gimple *g
15495 = gimple_build_assign (lhs, build2 (MEM_REF, lhs_type, aligned_addr,
15496 build_int_cst (arg1_type, 0)));
15497 gimple_set_location (g, loc);
15498 gsi_replace (gsi, g, true);
15499 return true;
15501 /* Vector stores. */
15502 case ALTIVEC_BUILTIN_STVX_V16QI:
15503 case ALTIVEC_BUILTIN_STVX_V8HI:
15504 case ALTIVEC_BUILTIN_STVX_V4SI:
15505 case ALTIVEC_BUILTIN_STVX_V4SF:
15506 case ALTIVEC_BUILTIN_STVX_V2DI:
15507 case ALTIVEC_BUILTIN_STVX_V2DF:
15509 arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */
15510 arg1 = gimple_call_arg (stmt, 1); /* Offset. */
15511 tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */
15512 location_t loc = gimple_location (stmt);
15513 tree arg0_type = TREE_TYPE (arg0);
15514 /* Use ptr_type_node (no TBAA) for the arg2_type.
15515 FIXME: (Richard) "A proper fix would be to transition this type as
15516 seen from the frontend to GIMPLE, for example in a similar way we
15517 do for MEM_REFs by piggy-backing that on an extra argument, a
15518 constant zero pointer of the alias pointer type to use (which would
15519 also serve as a type indicator of the store itself). I'd use a
15520 target specific internal function for this (not sure if we can have
15521 those target specific, but I guess if it's folded away then that's
15522 fine) and get away with the overload set." */
15523 tree arg2_type = ptr_type_node;
15524 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15525 the tree using the value from arg0. The resulting type will match
15526 the type of arg2. */
15527 gimple_seq stmts = NULL;
15528 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1);
15529 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15530 arg2_type, arg2, temp_offset);
15531 /* Mask off any lower bits from the address. */
15532 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
15533 arg2_type, temp_addr,
15534 build_int_cst (arg2_type, -16));
15535 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15536 if (!is_gimple_mem_ref_addr (aligned_addr))
15538 tree t = make_ssa_name (TREE_TYPE (aligned_addr));
15539 gimple *g = gimple_build_assign (t, aligned_addr);
15540 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15541 aligned_addr = t;
15543 /* The desired gimple result should be similar to:
15544 MEM[(__vector floatD.1407 *)_1] = vf1D.2697; */
15545 gimple *g
15546 = gimple_build_assign (build2 (MEM_REF, arg0_type, aligned_addr,
15547 build_int_cst (arg2_type, 0)), arg0);
15548 gimple_set_location (g, loc);
15549 gsi_replace (gsi, g, true);
15550 return true;
15553 /* unaligned Vector loads. */
15554 case VSX_BUILTIN_LXVW4X_V16QI:
15555 case VSX_BUILTIN_LXVW4X_V8HI:
15556 case VSX_BUILTIN_LXVW4X_V4SF:
15557 case VSX_BUILTIN_LXVW4X_V4SI:
15558 case VSX_BUILTIN_LXVD2X_V2DF:
15559 case VSX_BUILTIN_LXVD2X_V2DI:
15561 arg0 = gimple_call_arg (stmt, 0); // offset
15562 arg1 = gimple_call_arg (stmt, 1); // address
15563 lhs = gimple_call_lhs (stmt);
15564 location_t loc = gimple_location (stmt);
15565 /* Since arg1 may be cast to a different type, just use ptr_type_node
15566 here instead of trying to enforce TBAA on pointer types. */
15567 tree arg1_type = ptr_type_node;
15568 tree lhs_type = TREE_TYPE (lhs);
15569 /* In GIMPLE the type of the MEM_REF specifies the alignment. The
15570 required alignment (power) is 4 bytes regardless of data type. */
15571 tree align_ltype = build_aligned_type (lhs_type, 4);
15572 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15573 the tree using the value from arg0. The resulting type will match
15574 the type of arg1. */
15575 gimple_seq stmts = NULL;
15576 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0);
15577 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15578 arg1_type, arg1, temp_offset);
15579 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15580 if (!is_gimple_mem_ref_addr (temp_addr))
15582 tree t = make_ssa_name (TREE_TYPE (temp_addr));
15583 gimple *g = gimple_build_assign (t, temp_addr);
15584 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15585 temp_addr = t;
15587 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
15588 take an offset, but since we've already incorporated the offset
15589 above, here we just pass in a zero. */
15590 gimple *g;
15591 g = gimple_build_assign (lhs, build2 (MEM_REF, align_ltype, temp_addr,
15592 build_int_cst (arg1_type, 0)));
15593 gimple_set_location (g, loc);
15594 gsi_replace (gsi, g, true);
15595 return true;
15598 /* unaligned Vector stores. */
15599 case VSX_BUILTIN_STXVW4X_V16QI:
15600 case VSX_BUILTIN_STXVW4X_V8HI:
15601 case VSX_BUILTIN_STXVW4X_V4SF:
15602 case VSX_BUILTIN_STXVW4X_V4SI:
15603 case VSX_BUILTIN_STXVD2X_V2DF:
15604 case VSX_BUILTIN_STXVD2X_V2DI:
15606 arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */
15607 arg1 = gimple_call_arg (stmt, 1); /* Offset. */
15608 tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */
15609 location_t loc = gimple_location (stmt);
15610 tree arg0_type = TREE_TYPE (arg0);
15611 /* Use ptr_type_node (no TBAA) for the arg2_type. */
15612 tree arg2_type = ptr_type_node;
15613 /* In GIMPLE the type of the MEM_REF specifies the alignment. The
15614 required alignment (power) is 4 bytes regardless of data type. */
15615 tree align_stype = build_aligned_type (arg0_type, 4);
15616 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15617 the tree using the value from arg1. */
15618 gimple_seq stmts = NULL;
15619 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1);
15620 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15621 arg2_type, arg2, temp_offset);
15622 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15623 if (!is_gimple_mem_ref_addr (temp_addr))
15625 tree t = make_ssa_name (TREE_TYPE (temp_addr));
15626 gimple *g = gimple_build_assign (t, temp_addr);
15627 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15628 temp_addr = t;
15630 gimple *g;
15631 g = gimple_build_assign (build2 (MEM_REF, align_stype, temp_addr,
15632 build_int_cst (arg2_type, 0)), arg0);
15633 gimple_set_location (g, loc);
15634 gsi_replace (gsi, g, true);
15635 return true;
15638 /* Vector Fused multiply-add (fma). */
15639 case ALTIVEC_BUILTIN_VMADDFP:
15640 case VSX_BUILTIN_XVMADDDP:
15641 case ALTIVEC_BUILTIN_VMLADDUHM:
15643 arg0 = gimple_call_arg (stmt, 0);
15644 arg1 = gimple_call_arg (stmt, 1);
15645 tree arg2 = gimple_call_arg (stmt, 2);
15646 lhs = gimple_call_lhs (stmt);
15647 gcall *g = gimple_build_call_internal (IFN_FMA, 3, arg0, arg1, arg2);
15648 gimple_call_set_lhs (g, lhs);
15649 gimple_call_set_nothrow (g, true);
15650 gimple_set_location (g, gimple_location (stmt));
15651 gsi_replace (gsi, g, true);
15652 return true;
15655 /* Vector compares; EQ, NE, GE, GT, LE. */
15656 case ALTIVEC_BUILTIN_VCMPEQUB:
15657 case ALTIVEC_BUILTIN_VCMPEQUH:
15658 case ALTIVEC_BUILTIN_VCMPEQUW:
15659 case P8V_BUILTIN_VCMPEQUD:
15660 fold_compare_helper (gsi, EQ_EXPR, stmt);
15661 return true;
15663 case P9V_BUILTIN_CMPNEB:
15664 case P9V_BUILTIN_CMPNEH:
15665 case P9V_BUILTIN_CMPNEW:
15666 fold_compare_helper (gsi, NE_EXPR, stmt);
15667 return true;
15669 case VSX_BUILTIN_CMPGE_16QI:
15670 case VSX_BUILTIN_CMPGE_U16QI:
15671 case VSX_BUILTIN_CMPGE_8HI:
15672 case VSX_BUILTIN_CMPGE_U8HI:
15673 case VSX_BUILTIN_CMPGE_4SI:
15674 case VSX_BUILTIN_CMPGE_U4SI:
15675 case VSX_BUILTIN_CMPGE_2DI:
15676 case VSX_BUILTIN_CMPGE_U2DI:
15677 fold_compare_helper (gsi, GE_EXPR, stmt);
15678 return true;
15680 case ALTIVEC_BUILTIN_VCMPGTSB:
15681 case ALTIVEC_BUILTIN_VCMPGTUB:
15682 case ALTIVEC_BUILTIN_VCMPGTSH:
15683 case ALTIVEC_BUILTIN_VCMPGTUH:
15684 case ALTIVEC_BUILTIN_VCMPGTSW:
15685 case ALTIVEC_BUILTIN_VCMPGTUW:
15686 case P8V_BUILTIN_VCMPGTUD:
15687 case P8V_BUILTIN_VCMPGTSD:
15688 fold_compare_helper (gsi, GT_EXPR, stmt);
15689 return true;
15691 case VSX_BUILTIN_CMPLE_16QI:
15692 case VSX_BUILTIN_CMPLE_U16QI:
15693 case VSX_BUILTIN_CMPLE_8HI:
15694 case VSX_BUILTIN_CMPLE_U8HI:
15695 case VSX_BUILTIN_CMPLE_4SI:
15696 case VSX_BUILTIN_CMPLE_U4SI:
15697 case VSX_BUILTIN_CMPLE_2DI:
15698 case VSX_BUILTIN_CMPLE_U2DI:
15699 fold_compare_helper (gsi, LE_EXPR, stmt);
15700 return true;
15702 /* flavors of vec_splat_[us]{8,16,32}. */
15703 case ALTIVEC_BUILTIN_VSPLTISB:
15704 case ALTIVEC_BUILTIN_VSPLTISH:
15705 case ALTIVEC_BUILTIN_VSPLTISW:
15707 arg0 = gimple_call_arg (stmt, 0);
15708 lhs = gimple_call_lhs (stmt);
15710 /* Only fold the vec_splat_*() if the lower bits of arg 0 is a
15711 5-bit signed constant in range -16 to +15. */
15712 if (TREE_CODE (arg0) != INTEGER_CST
15713 || !IN_RANGE (TREE_INT_CST_LOW (arg0), -16, 15))
15714 return false;
15715 gimple_seq stmts = NULL;
15716 location_t loc = gimple_location (stmt);
15717 tree splat_value = gimple_convert (&stmts, loc,
15718 TREE_TYPE (TREE_TYPE (lhs)), arg0);
15719 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15720 tree splat_tree = build_vector_from_val (TREE_TYPE (lhs), splat_value);
15721 g = gimple_build_assign (lhs, splat_tree);
15722 gimple_set_location (g, gimple_location (stmt));
15723 gsi_replace (gsi, g, true);
15724 return true;
15727 /* Flavors of vec_splat. */
15728 /* a = vec_splat (b, 0x3) becomes a = { b[3],b[3],b[3],...}; */
15729 case ALTIVEC_BUILTIN_VSPLTB:
15730 case ALTIVEC_BUILTIN_VSPLTH:
15731 case ALTIVEC_BUILTIN_VSPLTW:
15732 case VSX_BUILTIN_XXSPLTD_V2DI:
15733 case VSX_BUILTIN_XXSPLTD_V2DF:
15735 arg0 = gimple_call_arg (stmt, 0); /* input vector. */
15736 arg1 = gimple_call_arg (stmt, 1); /* index into arg0. */
15737 /* Only fold the vec_splat_*() if arg1 is both a constant value and
15738 is a valid index into the arg0 vector. */
15739 unsigned int n_elts = VECTOR_CST_NELTS (arg0);
15740 if (TREE_CODE (arg1) != INTEGER_CST
15741 || TREE_INT_CST_LOW (arg1) > (n_elts -1))
15742 return false;
15743 lhs = gimple_call_lhs (stmt);
15744 tree lhs_type = TREE_TYPE (lhs);
15745 tree arg0_type = TREE_TYPE (arg0);
15746 tree splat;
15747 if (TREE_CODE (arg0) == VECTOR_CST)
15748 splat = VECTOR_CST_ELT (arg0, TREE_INT_CST_LOW (arg1));
15749 else
15751 /* Determine (in bits) the length and start location of the
15752 splat value for a call to the tree_vec_extract helper. */
15753 int splat_elem_size = TREE_INT_CST_LOW (size_in_bytes (arg0_type))
15754 * BITS_PER_UNIT / n_elts;
15755 int splat_start_bit = TREE_INT_CST_LOW (arg1) * splat_elem_size;
15756 tree len = build_int_cst (bitsizetype, splat_elem_size);
15757 tree start = build_int_cst (bitsizetype, splat_start_bit);
15758 splat = tree_vec_extract (gsi, TREE_TYPE (lhs_type), arg0,
15759 len, start);
15761 /* And finally, build the new vector. */
15762 tree splat_tree = build_vector_from_val (lhs_type, splat);
15763 g = gimple_build_assign (lhs, splat_tree);
15764 gimple_set_location (g, gimple_location (stmt));
15765 gsi_replace (gsi, g, true);
15766 return true;
15769 /* vec_mergel (integrals). */
15770 case ALTIVEC_BUILTIN_VMRGLH:
15771 case ALTIVEC_BUILTIN_VMRGLW:
15772 case VSX_BUILTIN_XXMRGLW_4SI:
15773 case ALTIVEC_BUILTIN_VMRGLB:
15774 case VSX_BUILTIN_VEC_MERGEL_V2DI:
15775 case VSX_BUILTIN_XXMRGLW_4SF:
15776 case VSX_BUILTIN_VEC_MERGEL_V2DF:
15777 fold_mergehl_helper (gsi, stmt, 1);
15778 return true;
15779 /* vec_mergeh (integrals). */
15780 case ALTIVEC_BUILTIN_VMRGHH:
15781 case ALTIVEC_BUILTIN_VMRGHW:
15782 case VSX_BUILTIN_XXMRGHW_4SI:
15783 case ALTIVEC_BUILTIN_VMRGHB:
15784 case VSX_BUILTIN_VEC_MERGEH_V2DI:
15785 case VSX_BUILTIN_XXMRGHW_4SF:
15786 case VSX_BUILTIN_VEC_MERGEH_V2DF:
15787 fold_mergehl_helper (gsi, stmt, 0);
15788 return true;
15790 /* Flavors of vec_mergee. */
15791 case P8V_BUILTIN_VMRGEW_V4SI:
15792 case P8V_BUILTIN_VMRGEW_V2DI:
15793 case P8V_BUILTIN_VMRGEW_V4SF:
15794 case P8V_BUILTIN_VMRGEW_V2DF:
15795 fold_mergeeo_helper (gsi, stmt, 0);
15796 return true;
15797 /* Flavors of vec_mergeo. */
15798 case P8V_BUILTIN_VMRGOW_V4SI:
15799 case P8V_BUILTIN_VMRGOW_V2DI:
15800 case P8V_BUILTIN_VMRGOW_V4SF:
15801 case P8V_BUILTIN_VMRGOW_V2DF:
15802 fold_mergeeo_helper (gsi, stmt, 1);
15803 return true;
15805 /* d = vec_pack (a, b) */
15806 case P8V_BUILTIN_VPKUDUM:
15807 case ALTIVEC_BUILTIN_VPKUHUM:
15808 case ALTIVEC_BUILTIN_VPKUWUM:
15810 arg0 = gimple_call_arg (stmt, 0);
15811 arg1 = gimple_call_arg (stmt, 1);
15812 lhs = gimple_call_lhs (stmt);
15813 gimple *g = gimple_build_assign (lhs, VEC_PACK_TRUNC_EXPR, arg0, arg1);
15814 gimple_set_location (g, gimple_location (stmt));
15815 gsi_replace (gsi, g, true);
15816 return true;
15819 /* d = vec_unpackh (a) */
15820 /* Note that the UNPACK_{HI,LO}_EXPR used in the gimple_build_assign call
15821 in this code is sensitive to endian-ness, and needs to be inverted to
15822 handle both LE and BE targets. */
15823 case ALTIVEC_BUILTIN_VUPKHSB:
15824 case ALTIVEC_BUILTIN_VUPKHSH:
15825 case P8V_BUILTIN_VUPKHSW:
15827 arg0 = gimple_call_arg (stmt, 0);
15828 lhs = gimple_call_lhs (stmt);
15829 if (BYTES_BIG_ENDIAN)
15830 g = gimple_build_assign (lhs, VEC_UNPACK_HI_EXPR, arg0);
15831 else
15832 g = gimple_build_assign (lhs, VEC_UNPACK_LO_EXPR, arg0);
15833 gimple_set_location (g, gimple_location (stmt));
15834 gsi_replace (gsi, g, true);
15835 return true;
15837 /* d = vec_unpackl (a) */
15838 case ALTIVEC_BUILTIN_VUPKLSB:
15839 case ALTIVEC_BUILTIN_VUPKLSH:
15840 case P8V_BUILTIN_VUPKLSW:
15842 arg0 = gimple_call_arg (stmt, 0);
15843 lhs = gimple_call_lhs (stmt);
15844 if (BYTES_BIG_ENDIAN)
15845 g = gimple_build_assign (lhs, VEC_UNPACK_LO_EXPR, arg0);
15846 else
15847 g = gimple_build_assign (lhs, VEC_UNPACK_HI_EXPR, arg0);
15848 gimple_set_location (g, gimple_location (stmt));
15849 gsi_replace (gsi, g, true);
15850 return true;
15852 /* There is no gimple type corresponding with pixel, so just return. */
15853 case ALTIVEC_BUILTIN_VUPKHPX:
15854 case ALTIVEC_BUILTIN_VUPKLPX:
15855 return false;
15857 /* vec_perm. */
15858 case ALTIVEC_BUILTIN_VPERM_16QI:
15859 case ALTIVEC_BUILTIN_VPERM_8HI:
15860 case ALTIVEC_BUILTIN_VPERM_4SI:
15861 case ALTIVEC_BUILTIN_VPERM_2DI:
15862 case ALTIVEC_BUILTIN_VPERM_4SF:
15863 case ALTIVEC_BUILTIN_VPERM_2DF:
15865 arg0 = gimple_call_arg (stmt, 0);
15866 arg1 = gimple_call_arg (stmt, 1);
15867 tree permute = gimple_call_arg (stmt, 2);
15868 lhs = gimple_call_lhs (stmt);
15869 location_t loc = gimple_location (stmt);
15870 gimple_seq stmts = NULL;
15871 // convert arg0 and arg1 to match the type of the permute
15872 // for the VEC_PERM_EXPR operation.
15873 tree permute_type = (TREE_TYPE (permute));
15874 tree arg0_ptype = gimple_convert (&stmts, loc, permute_type, arg0);
15875 tree arg1_ptype = gimple_convert (&stmts, loc, permute_type, arg1);
15876 tree lhs_ptype = gimple_build (&stmts, loc, VEC_PERM_EXPR,
15877 permute_type, arg0_ptype, arg1_ptype,
15878 permute);
15879 // Convert the result back to the desired lhs type upon completion.
15880 tree temp = gimple_convert (&stmts, loc, TREE_TYPE (lhs), lhs_ptype);
15881 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15882 g = gimple_build_assign (lhs, temp);
15883 gimple_set_location (g, loc);
15884 gsi_replace (gsi, g, true);
15885 return true;
15888 default:
15889 if (TARGET_DEBUG_BUILTIN)
15890 fprintf (stderr, "gimple builtin intrinsic not matched:%d %s %s\n",
15891 fn_code, fn_name1, fn_name2);
15892 break;
15895 return false;
15898 /* Expand an expression EXP that calls a built-in function,
15899 with result going to TARGET if that's convenient
15900 (and in mode MODE if that's convenient).
15901 SUBTARGET may be used as the target for computing one of EXP's operands.
15902 IGNORE is nonzero if the value is to be ignored. */
15904 static rtx
15905 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
15906 machine_mode mode ATTRIBUTE_UNUSED,
15907 int ignore ATTRIBUTE_UNUSED)
15909 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15910 enum rs6000_builtins fcode
15911 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
15912 size_t uns_fcode = (size_t)fcode;
15913 const struct builtin_description *d;
15914 size_t i;
15915 rtx ret;
15916 bool success;
15917 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask;
15918 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
15919 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
15921 /* We have two different modes (KFmode, TFmode) that are the IEEE 128-bit
15922 floating point type, depending on whether long double is the IBM extended
15923 double (KFmode) or long double is IEEE 128-bit (TFmode). It is simpler if
15924 we only define one variant of the built-in function, and switch the code
15925 when defining it, rather than defining two built-ins and using the
15926 overload table in rs6000-c.c to switch between the two. If we don't have
15927 the proper assembler, don't do this switch because CODE_FOR_*kf* and
15928 CODE_FOR_*tf* will be CODE_FOR_nothing. */
15929 if (FLOAT128_IEEE_P (TFmode))
15930 switch (icode)
15932 default:
15933 break;
15935 case CODE_FOR_sqrtkf2_odd: icode = CODE_FOR_sqrttf2_odd; break;
15936 case CODE_FOR_trunckfdf2_odd: icode = CODE_FOR_trunctfdf2_odd; break;
15937 case CODE_FOR_addkf3_odd: icode = CODE_FOR_addtf3_odd; break;
15938 case CODE_FOR_subkf3_odd: icode = CODE_FOR_subtf3_odd; break;
15939 case CODE_FOR_mulkf3_odd: icode = CODE_FOR_multf3_odd; break;
15940 case CODE_FOR_divkf3_odd: icode = CODE_FOR_divtf3_odd; break;
15941 case CODE_FOR_fmakf4_odd: icode = CODE_FOR_fmatf4_odd; break;
15942 case CODE_FOR_xsxexpqp_kf: icode = CODE_FOR_xsxexpqp_tf; break;
15943 case CODE_FOR_xsxsigqp_kf: icode = CODE_FOR_xsxsigqp_tf; break;
15944 case CODE_FOR_xststdcnegqp_kf: icode = CODE_FOR_xststdcnegqp_tf; break;
15945 case CODE_FOR_xsiexpqp_kf: icode = CODE_FOR_xsiexpqp_tf; break;
15946 case CODE_FOR_xsiexpqpf_kf: icode = CODE_FOR_xsiexpqpf_tf; break;
15947 case CODE_FOR_xststdcqp_kf: icode = CODE_FOR_xststdcqp_tf; break;
15950 if (TARGET_DEBUG_BUILTIN)
15952 const char *name1 = rs6000_builtin_info[uns_fcode].name;
15953 const char *name2 = (icode != CODE_FOR_nothing)
15954 ? get_insn_name ((int) icode)
15955 : "nothing";
15956 const char *name3;
15958 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
15960 default: name3 = "unknown"; break;
15961 case RS6000_BTC_SPECIAL: name3 = "special"; break;
15962 case RS6000_BTC_UNARY: name3 = "unary"; break;
15963 case RS6000_BTC_BINARY: name3 = "binary"; break;
15964 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
15965 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
15966 case RS6000_BTC_ABS: name3 = "abs"; break;
15967 case RS6000_BTC_DST: name3 = "dst"; break;
15971 fprintf (stderr,
15972 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
15973 (name1) ? name1 : "---", fcode,
15974 (name2) ? name2 : "---", (int) icode,
15975 name3,
15976 func_valid_p ? "" : ", not valid");
15979 if (!func_valid_p)
15981 rs6000_invalid_builtin (fcode);
15983 /* Given it is invalid, just generate a normal call. */
15984 return expand_call (exp, target, ignore);
15987 switch (fcode)
15989 case RS6000_BUILTIN_RECIP:
15990 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
15992 case RS6000_BUILTIN_RECIPF:
15993 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
15995 case RS6000_BUILTIN_RSQRTF:
15996 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
15998 case RS6000_BUILTIN_RSQRT:
15999 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
16001 case POWER7_BUILTIN_BPERMD:
16002 return rs6000_expand_binop_builtin (((TARGET_64BIT)
16003 ? CODE_FOR_bpermd_di
16004 : CODE_FOR_bpermd_si), exp, target);
16006 case RS6000_BUILTIN_GET_TB:
16007 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
16008 target);
16010 case RS6000_BUILTIN_MFTB:
16011 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
16012 ? CODE_FOR_rs6000_mftb_di
16013 : CODE_FOR_rs6000_mftb_si),
16014 target);
16016 case RS6000_BUILTIN_MFFS:
16017 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs, target);
16019 case RS6000_BUILTIN_MTFSB0:
16020 return rs6000_expand_mtfsb_builtin (CODE_FOR_rs6000_mtfsb0, exp);
16022 case RS6000_BUILTIN_MTFSB1:
16023 return rs6000_expand_mtfsb_builtin (CODE_FOR_rs6000_mtfsb1, exp);
16025 case RS6000_BUILTIN_SET_FPSCR_RN:
16026 return rs6000_expand_set_fpscr_rn_builtin (CODE_FOR_rs6000_set_fpscr_rn,
16027 exp);
16029 case RS6000_BUILTIN_SET_FPSCR_DRN:
16030 return
16031 rs6000_expand_set_fpscr_drn_builtin (CODE_FOR_rs6000_set_fpscr_drn,
16032 exp);
16034 case RS6000_BUILTIN_MFFSL:
16035 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffsl, target);
16037 case RS6000_BUILTIN_MTFSF:
16038 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf, exp);
16040 case RS6000_BUILTIN_CPU_INIT:
16041 case RS6000_BUILTIN_CPU_IS:
16042 case RS6000_BUILTIN_CPU_SUPPORTS:
16043 return cpu_expand_builtin (fcode, exp, target);
16045 case MISC_BUILTIN_SPEC_BARRIER:
16047 emit_insn (gen_speculation_barrier ());
16048 return NULL_RTX;
16051 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
16052 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
16054 int icode2 = (BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr_direct
16055 : (int) CODE_FOR_altivec_lvsl_direct);
16056 machine_mode tmode = insn_data[icode2].operand[0].mode;
16057 machine_mode mode = insn_data[icode2].operand[1].mode;
16058 tree arg;
16059 rtx op, addr, pat;
16061 gcc_assert (TARGET_ALTIVEC);
16063 arg = CALL_EXPR_ARG (exp, 0);
16064 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
16065 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
16066 addr = memory_address (mode, op);
16067 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
16068 op = addr;
16069 else
16071 /* For the load case need to negate the address. */
16072 op = gen_reg_rtx (GET_MODE (addr));
16073 emit_insn (gen_rtx_SET (op, gen_rtx_NEG (GET_MODE (addr), addr)));
16075 op = gen_rtx_MEM (mode, op);
16077 if (target == 0
16078 || GET_MODE (target) != tmode
16079 || ! (*insn_data[icode2].operand[0].predicate) (target, tmode))
16080 target = gen_reg_rtx (tmode);
16082 pat = GEN_FCN (icode2) (target, op);
16083 if (!pat)
16084 return 0;
16085 emit_insn (pat);
16087 return target;
16090 case ALTIVEC_BUILTIN_VCFUX:
16091 case ALTIVEC_BUILTIN_VCFSX:
16092 case ALTIVEC_BUILTIN_VCTUXS:
16093 case ALTIVEC_BUILTIN_VCTSXS:
16094 /* FIXME: There's got to be a nicer way to handle this case than
16095 constructing a new CALL_EXPR. */
16096 if (call_expr_nargs (exp) == 1)
16098 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
16099 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
16101 break;
16103 /* For the pack and unpack int128 routines, fix up the builtin so it
16104 uses the correct IBM128 type. */
16105 case MISC_BUILTIN_PACK_IF:
16106 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
16108 icode = CODE_FOR_packtf;
16109 fcode = MISC_BUILTIN_PACK_TF;
16110 uns_fcode = (size_t)fcode;
16112 break;
16114 case MISC_BUILTIN_UNPACK_IF:
16115 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
16117 icode = CODE_FOR_unpacktf;
16118 fcode = MISC_BUILTIN_UNPACK_TF;
16119 uns_fcode = (size_t)fcode;
16121 break;
16123 default:
16124 break;
16127 if (TARGET_ALTIVEC)
16129 ret = altivec_expand_builtin (exp, target, &success);
16131 if (success)
16132 return ret;
16134 if (TARGET_HTM)
16136 ret = htm_expand_builtin (exp, target, &success);
16138 if (success)
16139 return ret;
16142 unsigned attr = rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK;
16143 /* RS6000_BTC_SPECIAL represents no-operand operators. */
16144 gcc_assert (attr == RS6000_BTC_UNARY
16145 || attr == RS6000_BTC_BINARY
16146 || attr == RS6000_BTC_TERNARY
16147 || attr == RS6000_BTC_SPECIAL);
16149 /* Handle simple unary operations. */
16150 d = bdesc_1arg;
16151 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
16152 if (d->code == fcode)
16153 return rs6000_expand_unop_builtin (icode, exp, target);
16155 /* Handle simple binary operations. */
16156 d = bdesc_2arg;
16157 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
16158 if (d->code == fcode)
16159 return rs6000_expand_binop_builtin (icode, exp, target);
16161 /* Handle simple ternary operations. */
16162 d = bdesc_3arg;
16163 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
16164 if (d->code == fcode)
16165 return rs6000_expand_ternop_builtin (icode, exp, target);
16167 /* Handle simple no-argument operations. */
16168 d = bdesc_0arg;
16169 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
16170 if (d->code == fcode)
16171 return rs6000_expand_zeroop_builtin (icode, target);
16173 gcc_unreachable ();
16176 /* Create a builtin vector type with a name. Taking care not to give
16177 the canonical type a name. */
16179 static tree
16180 rs6000_vector_type (const char *name, tree elt_type, unsigned num_elts)
16182 tree result = build_vector_type (elt_type, num_elts);
16184 /* Copy so we don't give the canonical type a name. */
16185 result = build_variant_type_copy (result);
16187 add_builtin_type (name, result);
16189 return result;
16192 static void
16193 rs6000_init_builtins (void)
16195 tree tdecl;
16196 tree ftype;
16197 machine_mode mode;
16199 if (TARGET_DEBUG_BUILTIN)
16200 fprintf (stderr, "rs6000_init_builtins%s%s\n",
16201 (TARGET_ALTIVEC) ? ", altivec" : "",
16202 (TARGET_VSX) ? ", vsx" : "");
16204 V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64 ? "__vector long"
16205 : "__vector long long",
16206 intDI_type_node, 2);
16207 V2DF_type_node = rs6000_vector_type ("__vector double", double_type_node, 2);
16208 V4SI_type_node = rs6000_vector_type ("__vector signed int",
16209 intSI_type_node, 4);
16210 V4SF_type_node = rs6000_vector_type ("__vector float", float_type_node, 4);
16211 V8HI_type_node = rs6000_vector_type ("__vector signed short",
16212 intHI_type_node, 8);
16213 V16QI_type_node = rs6000_vector_type ("__vector signed char",
16214 intQI_type_node, 16);
16216 unsigned_V16QI_type_node = rs6000_vector_type ("__vector unsigned char",
16217 unsigned_intQI_type_node, 16);
16218 unsigned_V8HI_type_node = rs6000_vector_type ("__vector unsigned short",
16219 unsigned_intHI_type_node, 8);
16220 unsigned_V4SI_type_node = rs6000_vector_type ("__vector unsigned int",
16221 unsigned_intSI_type_node, 4);
16222 unsigned_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16223 ? "__vector unsigned long"
16224 : "__vector unsigned long long",
16225 unsigned_intDI_type_node, 2);
16227 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
16229 const_str_type_node
16230 = build_pointer_type (build_qualified_type (char_type_node,
16231 TYPE_QUAL_CONST));
16233 /* We use V1TI mode as a special container to hold __int128_t items that
16234 must live in VSX registers. */
16235 if (intTI_type_node)
16237 V1TI_type_node = rs6000_vector_type ("__vector __int128",
16238 intTI_type_node, 1);
16239 unsigned_V1TI_type_node
16240 = rs6000_vector_type ("__vector unsigned __int128",
16241 unsigned_intTI_type_node, 1);
16244 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
16245 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
16246 'vector unsigned short'. */
16248 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
16249 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16250 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
16251 bool_long_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
16252 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16254 long_integer_type_internal_node = long_integer_type_node;
16255 long_unsigned_type_internal_node = long_unsigned_type_node;
16256 long_long_integer_type_internal_node = long_long_integer_type_node;
16257 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
16258 intQI_type_internal_node = intQI_type_node;
16259 uintQI_type_internal_node = unsigned_intQI_type_node;
16260 intHI_type_internal_node = intHI_type_node;
16261 uintHI_type_internal_node = unsigned_intHI_type_node;
16262 intSI_type_internal_node = intSI_type_node;
16263 uintSI_type_internal_node = unsigned_intSI_type_node;
16264 intDI_type_internal_node = intDI_type_node;
16265 uintDI_type_internal_node = unsigned_intDI_type_node;
16266 intTI_type_internal_node = intTI_type_node;
16267 uintTI_type_internal_node = unsigned_intTI_type_node;
16268 float_type_internal_node = float_type_node;
16269 double_type_internal_node = double_type_node;
16270 long_double_type_internal_node = long_double_type_node;
16271 dfloat64_type_internal_node = dfloat64_type_node;
16272 dfloat128_type_internal_node = dfloat128_type_node;
16273 void_type_internal_node = void_type_node;
16275 /* 128-bit floating point support. KFmode is IEEE 128-bit floating point.
16276 IFmode is the IBM extended 128-bit format that is a pair of doubles.
16277 TFmode will be either IEEE 128-bit floating point or the IBM double-double
16278 format that uses a pair of doubles, depending on the switches and
16279 defaults.
16281 If we don't support for either 128-bit IBM double double or IEEE 128-bit
16282 floating point, we need make sure the type is non-zero or else self-test
16283 fails during bootstrap.
16285 Always create __ibm128 as a separate type, even if the current long double
16286 format is IBM extended double.
16288 For IEEE 128-bit floating point, always create the type __ieee128. If the
16289 user used -mfloat128, rs6000-c.c will create a define from __float128 to
16290 __ieee128. */
16291 if (TARGET_FLOAT128_TYPE)
16293 if (!TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128)
16294 ibm128_float_type_node = long_double_type_node;
16295 else
16297 ibm128_float_type_node = make_node (REAL_TYPE);
16298 TYPE_PRECISION (ibm128_float_type_node) = 128;
16299 SET_TYPE_MODE (ibm128_float_type_node, IFmode);
16300 layout_type (ibm128_float_type_node);
16303 lang_hooks.types.register_builtin_type (ibm128_float_type_node,
16304 "__ibm128");
16306 if (TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128)
16307 ieee128_float_type_node = long_double_type_node;
16308 else
16309 ieee128_float_type_node = float128_type_node;
16311 lang_hooks.types.register_builtin_type (ieee128_float_type_node,
16312 "__ieee128");
16315 else
16316 ieee128_float_type_node = ibm128_float_type_node = long_double_type_node;
16318 /* Initialize the modes for builtin_function_type, mapping a machine mode to
16319 tree type node. */
16320 builtin_mode_to_type[QImode][0] = integer_type_node;
16321 builtin_mode_to_type[HImode][0] = integer_type_node;
16322 builtin_mode_to_type[SImode][0] = intSI_type_node;
16323 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
16324 builtin_mode_to_type[DImode][0] = intDI_type_node;
16325 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
16326 builtin_mode_to_type[TImode][0] = intTI_type_node;
16327 builtin_mode_to_type[TImode][1] = unsigned_intTI_type_node;
16328 builtin_mode_to_type[SFmode][0] = float_type_node;
16329 builtin_mode_to_type[DFmode][0] = double_type_node;
16330 builtin_mode_to_type[IFmode][0] = ibm128_float_type_node;
16331 builtin_mode_to_type[KFmode][0] = ieee128_float_type_node;
16332 builtin_mode_to_type[TFmode][0] = long_double_type_node;
16333 builtin_mode_to_type[DDmode][0] = dfloat64_type_node;
16334 builtin_mode_to_type[TDmode][0] = dfloat128_type_node;
16335 builtin_mode_to_type[V1TImode][0] = V1TI_type_node;
16336 builtin_mode_to_type[V1TImode][1] = unsigned_V1TI_type_node;
16337 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
16338 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
16339 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
16340 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
16341 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
16342 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
16343 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
16344 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
16345 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
16346 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
16348 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
16349 TYPE_NAME (bool_char_type_node) = tdecl;
16351 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
16352 TYPE_NAME (bool_short_type_node) = tdecl;
16354 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
16355 TYPE_NAME (bool_int_type_node) = tdecl;
16357 tdecl = add_builtin_type ("__pixel", pixel_type_node);
16358 TYPE_NAME (pixel_type_node) = tdecl;
16360 bool_V16QI_type_node = rs6000_vector_type ("__vector __bool char",
16361 bool_char_type_node, 16);
16362 bool_V8HI_type_node = rs6000_vector_type ("__vector __bool short",
16363 bool_short_type_node, 8);
16364 bool_V4SI_type_node = rs6000_vector_type ("__vector __bool int",
16365 bool_int_type_node, 4);
16366 bool_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16367 ? "__vector __bool long"
16368 : "__vector __bool long long",
16369 bool_long_long_type_node, 2);
16370 pixel_V8HI_type_node = rs6000_vector_type ("__vector __pixel",
16371 pixel_type_node, 8);
16373 /* Create Altivec and VSX builtins on machines with at least the
16374 general purpose extensions (970 and newer) to allow the use of
16375 the target attribute. */
16376 if (TARGET_EXTRA_BUILTINS)
16377 altivec_init_builtins ();
16378 if (TARGET_HTM)
16379 htm_init_builtins ();
16381 if (TARGET_EXTRA_BUILTINS)
16382 rs6000_common_init_builtins ();
16384 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
16385 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
16386 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
16388 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
16389 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
16390 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
16392 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
16393 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
16394 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
16396 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
16397 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
16398 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
16400 mode = (TARGET_64BIT) ? DImode : SImode;
16401 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
16402 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
16403 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
16405 ftype = build_function_type_list (unsigned_intDI_type_node,
16406 NULL_TREE);
16407 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
16409 if (TARGET_64BIT)
16410 ftype = build_function_type_list (unsigned_intDI_type_node,
16411 NULL_TREE);
16412 else
16413 ftype = build_function_type_list (unsigned_intSI_type_node,
16414 NULL_TREE);
16415 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
16417 ftype = build_function_type_list (double_type_node, NULL_TREE);
16418 def_builtin ("__builtin_mffs", ftype, RS6000_BUILTIN_MFFS);
16420 ftype = build_function_type_list (double_type_node, NULL_TREE);
16421 def_builtin ("__builtin_mffsl", ftype, RS6000_BUILTIN_MFFSL);
16423 ftype = build_function_type_list (void_type_node,
16424 intSI_type_node,
16425 NULL_TREE);
16426 def_builtin ("__builtin_mtfsb0", ftype, RS6000_BUILTIN_MTFSB0);
16428 ftype = build_function_type_list (void_type_node,
16429 intSI_type_node,
16430 NULL_TREE);
16431 def_builtin ("__builtin_mtfsb1", ftype, RS6000_BUILTIN_MTFSB1);
16433 ftype = build_function_type_list (void_type_node,
16434 intDI_type_node,
16435 NULL_TREE);
16436 def_builtin ("__builtin_set_fpscr_rn", ftype, RS6000_BUILTIN_SET_FPSCR_RN);
16438 ftype = build_function_type_list (void_type_node,
16439 intDI_type_node,
16440 NULL_TREE);
16441 def_builtin ("__builtin_set_fpscr_drn", ftype, RS6000_BUILTIN_SET_FPSCR_DRN);
16443 ftype = build_function_type_list (void_type_node,
16444 intSI_type_node, double_type_node,
16445 NULL_TREE);
16446 def_builtin ("__builtin_mtfsf", ftype, RS6000_BUILTIN_MTFSF);
16448 ftype = build_function_type_list (void_type_node, NULL_TREE);
16449 def_builtin ("__builtin_cpu_init", ftype, RS6000_BUILTIN_CPU_INIT);
16450 def_builtin ("__builtin_ppc_speculation_barrier", ftype,
16451 MISC_BUILTIN_SPEC_BARRIER);
16453 ftype = build_function_type_list (bool_int_type_node, const_ptr_type_node,
16454 NULL_TREE);
16455 def_builtin ("__builtin_cpu_is", ftype, RS6000_BUILTIN_CPU_IS);
16456 def_builtin ("__builtin_cpu_supports", ftype, RS6000_BUILTIN_CPU_SUPPORTS);
16458 /* AIX libm provides clog as __clog. */
16459 if (TARGET_XCOFF &&
16460 (tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
16461 set_user_assembler_name (tdecl, "__clog");
16463 #ifdef SUBTARGET_INIT_BUILTINS
16464 SUBTARGET_INIT_BUILTINS;
16465 #endif
16468 /* Returns the rs6000 builtin decl for CODE. */
16470 static tree
16471 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
16473 HOST_WIDE_INT fnmask;
16475 if (code >= RS6000_BUILTIN_COUNT)
16476 return error_mark_node;
16478 fnmask = rs6000_builtin_info[code].mask;
16479 if ((fnmask & rs6000_builtin_mask) != fnmask)
16481 rs6000_invalid_builtin ((enum rs6000_builtins)code);
16482 return error_mark_node;
16485 return rs6000_builtin_decls[code];
16488 static void
16489 altivec_init_builtins (void)
16491 const struct builtin_description *d;
16492 size_t i;
16493 tree ftype;
16494 tree decl;
16495 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
16497 tree pvoid_type_node = build_pointer_type (void_type_node);
16499 tree pcvoid_type_node
16500 = build_pointer_type (build_qualified_type (void_type_node,
16501 TYPE_QUAL_CONST));
16503 tree int_ftype_opaque
16504 = build_function_type_list (integer_type_node,
16505 opaque_V4SI_type_node, NULL_TREE);
16506 tree opaque_ftype_opaque
16507 = build_function_type_list (integer_type_node, NULL_TREE);
16508 tree opaque_ftype_opaque_int
16509 = build_function_type_list (opaque_V4SI_type_node,
16510 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
16511 tree opaque_ftype_opaque_opaque_int
16512 = build_function_type_list (opaque_V4SI_type_node,
16513 opaque_V4SI_type_node, opaque_V4SI_type_node,
16514 integer_type_node, NULL_TREE);
16515 tree opaque_ftype_opaque_opaque_opaque
16516 = build_function_type_list (opaque_V4SI_type_node,
16517 opaque_V4SI_type_node, opaque_V4SI_type_node,
16518 opaque_V4SI_type_node, NULL_TREE);
16519 tree opaque_ftype_opaque_opaque
16520 = build_function_type_list (opaque_V4SI_type_node,
16521 opaque_V4SI_type_node, opaque_V4SI_type_node,
16522 NULL_TREE);
16523 tree int_ftype_int_opaque_opaque
16524 = build_function_type_list (integer_type_node,
16525 integer_type_node, opaque_V4SI_type_node,
16526 opaque_V4SI_type_node, NULL_TREE);
16527 tree int_ftype_int_v4si_v4si
16528 = build_function_type_list (integer_type_node,
16529 integer_type_node, V4SI_type_node,
16530 V4SI_type_node, NULL_TREE);
16531 tree int_ftype_int_v2di_v2di
16532 = build_function_type_list (integer_type_node,
16533 integer_type_node, V2DI_type_node,
16534 V2DI_type_node, NULL_TREE);
16535 tree void_ftype_v4si
16536 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
16537 tree v8hi_ftype_void
16538 = build_function_type_list (V8HI_type_node, NULL_TREE);
16539 tree void_ftype_void
16540 = build_function_type_list (void_type_node, NULL_TREE);
16541 tree void_ftype_int
16542 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
16544 tree opaque_ftype_long_pcvoid
16545 = build_function_type_list (opaque_V4SI_type_node,
16546 long_integer_type_node, pcvoid_type_node,
16547 NULL_TREE);
16548 tree v16qi_ftype_long_pcvoid
16549 = build_function_type_list (V16QI_type_node,
16550 long_integer_type_node, pcvoid_type_node,
16551 NULL_TREE);
16552 tree v8hi_ftype_long_pcvoid
16553 = build_function_type_list (V8HI_type_node,
16554 long_integer_type_node, pcvoid_type_node,
16555 NULL_TREE);
16556 tree v4si_ftype_long_pcvoid
16557 = build_function_type_list (V4SI_type_node,
16558 long_integer_type_node, pcvoid_type_node,
16559 NULL_TREE);
16560 tree v4sf_ftype_long_pcvoid
16561 = build_function_type_list (V4SF_type_node,
16562 long_integer_type_node, pcvoid_type_node,
16563 NULL_TREE);
16564 tree v2df_ftype_long_pcvoid
16565 = build_function_type_list (V2DF_type_node,
16566 long_integer_type_node, pcvoid_type_node,
16567 NULL_TREE);
16568 tree v2di_ftype_long_pcvoid
16569 = build_function_type_list (V2DI_type_node,
16570 long_integer_type_node, pcvoid_type_node,
16571 NULL_TREE);
16572 tree v1ti_ftype_long_pcvoid
16573 = build_function_type_list (V1TI_type_node,
16574 long_integer_type_node, pcvoid_type_node,
16575 NULL_TREE);
16577 tree void_ftype_opaque_long_pvoid
16578 = build_function_type_list (void_type_node,
16579 opaque_V4SI_type_node, long_integer_type_node,
16580 pvoid_type_node, NULL_TREE);
16581 tree void_ftype_v4si_long_pvoid
16582 = build_function_type_list (void_type_node,
16583 V4SI_type_node, long_integer_type_node,
16584 pvoid_type_node, NULL_TREE);
16585 tree void_ftype_v16qi_long_pvoid
16586 = build_function_type_list (void_type_node,
16587 V16QI_type_node, long_integer_type_node,
16588 pvoid_type_node, NULL_TREE);
16590 tree void_ftype_v16qi_pvoid_long
16591 = build_function_type_list (void_type_node,
16592 V16QI_type_node, pvoid_type_node,
16593 long_integer_type_node, NULL_TREE);
16595 tree void_ftype_v8hi_long_pvoid
16596 = build_function_type_list (void_type_node,
16597 V8HI_type_node, long_integer_type_node,
16598 pvoid_type_node, NULL_TREE);
16599 tree void_ftype_v4sf_long_pvoid
16600 = build_function_type_list (void_type_node,
16601 V4SF_type_node, long_integer_type_node,
16602 pvoid_type_node, NULL_TREE);
16603 tree void_ftype_v2df_long_pvoid
16604 = build_function_type_list (void_type_node,
16605 V2DF_type_node, long_integer_type_node,
16606 pvoid_type_node, NULL_TREE);
16607 tree void_ftype_v1ti_long_pvoid
16608 = build_function_type_list (void_type_node,
16609 V1TI_type_node, long_integer_type_node,
16610 pvoid_type_node, NULL_TREE);
16611 tree void_ftype_v2di_long_pvoid
16612 = build_function_type_list (void_type_node,
16613 V2DI_type_node, long_integer_type_node,
16614 pvoid_type_node, NULL_TREE);
16615 tree int_ftype_int_v8hi_v8hi
16616 = build_function_type_list (integer_type_node,
16617 integer_type_node, V8HI_type_node,
16618 V8HI_type_node, NULL_TREE);
16619 tree int_ftype_int_v16qi_v16qi
16620 = build_function_type_list (integer_type_node,
16621 integer_type_node, V16QI_type_node,
16622 V16QI_type_node, NULL_TREE);
16623 tree int_ftype_int_v4sf_v4sf
16624 = build_function_type_list (integer_type_node,
16625 integer_type_node, V4SF_type_node,
16626 V4SF_type_node, NULL_TREE);
16627 tree int_ftype_int_v2df_v2df
16628 = build_function_type_list (integer_type_node,
16629 integer_type_node, V2DF_type_node,
16630 V2DF_type_node, NULL_TREE);
16631 tree v2di_ftype_v2di
16632 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
16633 tree v4si_ftype_v4si
16634 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
16635 tree v8hi_ftype_v8hi
16636 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
16637 tree v16qi_ftype_v16qi
16638 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
16639 tree v4sf_ftype_v4sf
16640 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
16641 tree v2df_ftype_v2df
16642 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
16643 tree void_ftype_pcvoid_int_int
16644 = build_function_type_list (void_type_node,
16645 pcvoid_type_node, integer_type_node,
16646 integer_type_node, NULL_TREE);
16648 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
16649 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
16650 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
16651 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
16652 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
16653 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
16654 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
16655 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
16656 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
16657 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
16658 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid,
16659 ALTIVEC_BUILTIN_LVXL_V2DF);
16660 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid,
16661 ALTIVEC_BUILTIN_LVXL_V2DI);
16662 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid,
16663 ALTIVEC_BUILTIN_LVXL_V4SF);
16664 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid,
16665 ALTIVEC_BUILTIN_LVXL_V4SI);
16666 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid,
16667 ALTIVEC_BUILTIN_LVXL_V8HI);
16668 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid,
16669 ALTIVEC_BUILTIN_LVXL_V16QI);
16670 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
16671 def_builtin ("__builtin_altivec_lvx_v1ti", v1ti_ftype_long_pcvoid,
16672 ALTIVEC_BUILTIN_LVX_V1TI);
16673 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid,
16674 ALTIVEC_BUILTIN_LVX_V2DF);
16675 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid,
16676 ALTIVEC_BUILTIN_LVX_V2DI);
16677 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid,
16678 ALTIVEC_BUILTIN_LVX_V4SF);
16679 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid,
16680 ALTIVEC_BUILTIN_LVX_V4SI);
16681 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid,
16682 ALTIVEC_BUILTIN_LVX_V8HI);
16683 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid,
16684 ALTIVEC_BUILTIN_LVX_V16QI);
16685 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
16686 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid,
16687 ALTIVEC_BUILTIN_STVX_V2DF);
16688 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid,
16689 ALTIVEC_BUILTIN_STVX_V2DI);
16690 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid,
16691 ALTIVEC_BUILTIN_STVX_V4SF);
16692 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid,
16693 ALTIVEC_BUILTIN_STVX_V4SI);
16694 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid,
16695 ALTIVEC_BUILTIN_STVX_V8HI);
16696 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid,
16697 ALTIVEC_BUILTIN_STVX_V16QI);
16698 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
16699 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
16700 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid,
16701 ALTIVEC_BUILTIN_STVXL_V2DF);
16702 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid,
16703 ALTIVEC_BUILTIN_STVXL_V2DI);
16704 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid,
16705 ALTIVEC_BUILTIN_STVXL_V4SF);
16706 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid,
16707 ALTIVEC_BUILTIN_STVXL_V4SI);
16708 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid,
16709 ALTIVEC_BUILTIN_STVXL_V8HI);
16710 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid,
16711 ALTIVEC_BUILTIN_STVXL_V16QI);
16712 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
16713 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
16714 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
16715 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
16716 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
16717 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
16718 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
16719 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
16720 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
16721 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
16722 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
16723 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
16724 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
16725 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
16726 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
16727 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
16729 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
16730 VSX_BUILTIN_LXVD2X_V2DF);
16731 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
16732 VSX_BUILTIN_LXVD2X_V2DI);
16733 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
16734 VSX_BUILTIN_LXVW4X_V4SF);
16735 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
16736 VSX_BUILTIN_LXVW4X_V4SI);
16737 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
16738 VSX_BUILTIN_LXVW4X_V8HI);
16739 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
16740 VSX_BUILTIN_LXVW4X_V16QI);
16741 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
16742 VSX_BUILTIN_STXVD2X_V2DF);
16743 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
16744 VSX_BUILTIN_STXVD2X_V2DI);
16745 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
16746 VSX_BUILTIN_STXVW4X_V4SF);
16747 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
16748 VSX_BUILTIN_STXVW4X_V4SI);
16749 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
16750 VSX_BUILTIN_STXVW4X_V8HI);
16751 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
16752 VSX_BUILTIN_STXVW4X_V16QI);
16754 def_builtin ("__builtin_vsx_ld_elemrev_v2df", v2df_ftype_long_pcvoid,
16755 VSX_BUILTIN_LD_ELEMREV_V2DF);
16756 def_builtin ("__builtin_vsx_ld_elemrev_v2di", v2di_ftype_long_pcvoid,
16757 VSX_BUILTIN_LD_ELEMREV_V2DI);
16758 def_builtin ("__builtin_vsx_ld_elemrev_v4sf", v4sf_ftype_long_pcvoid,
16759 VSX_BUILTIN_LD_ELEMREV_V4SF);
16760 def_builtin ("__builtin_vsx_ld_elemrev_v4si", v4si_ftype_long_pcvoid,
16761 VSX_BUILTIN_LD_ELEMREV_V4SI);
16762 def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid,
16763 VSX_BUILTIN_LD_ELEMREV_V8HI);
16764 def_builtin ("__builtin_vsx_ld_elemrev_v16qi", v16qi_ftype_long_pcvoid,
16765 VSX_BUILTIN_LD_ELEMREV_V16QI);
16766 def_builtin ("__builtin_vsx_st_elemrev_v2df", void_ftype_v2df_long_pvoid,
16767 VSX_BUILTIN_ST_ELEMREV_V2DF);
16768 def_builtin ("__builtin_vsx_st_elemrev_v1ti", void_ftype_v1ti_long_pvoid,
16769 VSX_BUILTIN_ST_ELEMREV_V1TI);
16770 def_builtin ("__builtin_vsx_st_elemrev_v2di", void_ftype_v2di_long_pvoid,
16771 VSX_BUILTIN_ST_ELEMREV_V2DI);
16772 def_builtin ("__builtin_vsx_st_elemrev_v4sf", void_ftype_v4sf_long_pvoid,
16773 VSX_BUILTIN_ST_ELEMREV_V4SF);
16774 def_builtin ("__builtin_vsx_st_elemrev_v4si", void_ftype_v4si_long_pvoid,
16775 VSX_BUILTIN_ST_ELEMREV_V4SI);
16776 def_builtin ("__builtin_vsx_st_elemrev_v8hi", void_ftype_v8hi_long_pvoid,
16777 VSX_BUILTIN_ST_ELEMREV_V8HI);
16778 def_builtin ("__builtin_vsx_st_elemrev_v16qi", void_ftype_v16qi_long_pvoid,
16779 VSX_BUILTIN_ST_ELEMREV_V16QI);
16781 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
16782 VSX_BUILTIN_VEC_LD);
16783 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
16784 VSX_BUILTIN_VEC_ST);
16785 def_builtin ("__builtin_vec_xl", opaque_ftype_long_pcvoid,
16786 VSX_BUILTIN_VEC_XL);
16787 def_builtin ("__builtin_vec_xl_be", opaque_ftype_long_pcvoid,
16788 VSX_BUILTIN_VEC_XL_BE);
16789 def_builtin ("__builtin_vec_xst", void_ftype_opaque_long_pvoid,
16790 VSX_BUILTIN_VEC_XST);
16791 def_builtin ("__builtin_vec_xst_be", void_ftype_opaque_long_pvoid,
16792 VSX_BUILTIN_VEC_XST_BE);
16794 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
16795 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
16796 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
16798 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
16799 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
16800 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
16801 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
16802 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
16803 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
16804 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
16805 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
16806 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
16807 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
16808 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
16809 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
16811 def_builtin ("__builtin_vec_adde", opaque_ftype_opaque_opaque_opaque,
16812 ALTIVEC_BUILTIN_VEC_ADDE);
16813 def_builtin ("__builtin_vec_addec", opaque_ftype_opaque_opaque_opaque,
16814 ALTIVEC_BUILTIN_VEC_ADDEC);
16815 def_builtin ("__builtin_vec_cmpne", opaque_ftype_opaque_opaque,
16816 ALTIVEC_BUILTIN_VEC_CMPNE);
16817 def_builtin ("__builtin_vec_mul", opaque_ftype_opaque_opaque,
16818 ALTIVEC_BUILTIN_VEC_MUL);
16819 def_builtin ("__builtin_vec_sube", opaque_ftype_opaque_opaque_opaque,
16820 ALTIVEC_BUILTIN_VEC_SUBE);
16821 def_builtin ("__builtin_vec_subec", opaque_ftype_opaque_opaque_opaque,
16822 ALTIVEC_BUILTIN_VEC_SUBEC);
16824 /* Cell builtins. */
16825 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
16826 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
16827 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
16828 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
16830 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
16831 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
16832 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
16833 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
16835 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
16836 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
16837 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
16838 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
16840 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
16841 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
16842 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
16843 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
16845 if (TARGET_P9_VECTOR)
16847 def_builtin ("__builtin_altivec_stxvl", void_ftype_v16qi_pvoid_long,
16848 P9V_BUILTIN_STXVL);
16849 def_builtin ("__builtin_xst_len_r", void_ftype_v16qi_pvoid_long,
16850 P9V_BUILTIN_XST_LEN_R);
16853 /* Add the DST variants. */
16854 d = bdesc_dst;
16855 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
16857 HOST_WIDE_INT mask = d->mask;
16859 /* It is expected that these dst built-in functions may have
16860 d->icode equal to CODE_FOR_nothing. */
16861 if ((mask & builtin_mask) != mask)
16863 if (TARGET_DEBUG_BUILTIN)
16864 fprintf (stderr, "altivec_init_builtins, skip dst %s\n",
16865 d->name);
16866 continue;
16868 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
16871 /* Initialize the predicates. */
16872 d = bdesc_altivec_preds;
16873 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
16875 machine_mode mode1;
16876 tree type;
16877 HOST_WIDE_INT mask = d->mask;
16879 if ((mask & builtin_mask) != mask)
16881 if (TARGET_DEBUG_BUILTIN)
16882 fprintf (stderr, "altivec_init_builtins, skip predicate %s\n",
16883 d->name);
16884 continue;
16887 if (rs6000_overloaded_builtin_p (d->code))
16888 mode1 = VOIDmode;
16889 else
16891 /* Cannot define builtin if the instruction is disabled. */
16892 gcc_assert (d->icode != CODE_FOR_nothing);
16893 mode1 = insn_data[d->icode].operand[1].mode;
16896 switch (mode1)
16898 case E_VOIDmode:
16899 type = int_ftype_int_opaque_opaque;
16900 break;
16901 case E_V2DImode:
16902 type = int_ftype_int_v2di_v2di;
16903 break;
16904 case E_V4SImode:
16905 type = int_ftype_int_v4si_v4si;
16906 break;
16907 case E_V8HImode:
16908 type = int_ftype_int_v8hi_v8hi;
16909 break;
16910 case E_V16QImode:
16911 type = int_ftype_int_v16qi_v16qi;
16912 break;
16913 case E_V4SFmode:
16914 type = int_ftype_int_v4sf_v4sf;
16915 break;
16916 case E_V2DFmode:
16917 type = int_ftype_int_v2df_v2df;
16918 break;
16919 default:
16920 gcc_unreachable ();
16923 def_builtin (d->name, type, d->code);
16926 /* Initialize the abs* operators. */
16927 d = bdesc_abs;
16928 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
16930 machine_mode mode0;
16931 tree type;
16932 HOST_WIDE_INT mask = d->mask;
16934 if ((mask & builtin_mask) != mask)
16936 if (TARGET_DEBUG_BUILTIN)
16937 fprintf (stderr, "altivec_init_builtins, skip abs %s\n",
16938 d->name);
16939 continue;
16942 /* Cannot define builtin if the instruction is disabled. */
16943 gcc_assert (d->icode != CODE_FOR_nothing);
16944 mode0 = insn_data[d->icode].operand[0].mode;
16946 switch (mode0)
16948 case E_V2DImode:
16949 type = v2di_ftype_v2di;
16950 break;
16951 case E_V4SImode:
16952 type = v4si_ftype_v4si;
16953 break;
16954 case E_V8HImode:
16955 type = v8hi_ftype_v8hi;
16956 break;
16957 case E_V16QImode:
16958 type = v16qi_ftype_v16qi;
16959 break;
16960 case E_V4SFmode:
16961 type = v4sf_ftype_v4sf;
16962 break;
16963 case E_V2DFmode:
16964 type = v2df_ftype_v2df;
16965 break;
16966 default:
16967 gcc_unreachable ();
16970 def_builtin (d->name, type, d->code);
16973 /* Initialize target builtin that implements
16974 targetm.vectorize.builtin_mask_for_load. */
16976 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
16977 v16qi_ftype_long_pcvoid,
16978 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
16979 BUILT_IN_MD, NULL, NULL_TREE);
16980 TREE_READONLY (decl) = 1;
16981 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
16982 altivec_builtin_mask_for_load = decl;
16984 /* Access to the vec_init patterns. */
16985 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
16986 integer_type_node, integer_type_node,
16987 integer_type_node, NULL_TREE);
16988 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
16990 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
16991 short_integer_type_node,
16992 short_integer_type_node,
16993 short_integer_type_node,
16994 short_integer_type_node,
16995 short_integer_type_node,
16996 short_integer_type_node,
16997 short_integer_type_node, NULL_TREE);
16998 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
17000 ftype = build_function_type_list (V16QI_type_node, char_type_node,
17001 char_type_node, char_type_node,
17002 char_type_node, char_type_node,
17003 char_type_node, char_type_node,
17004 char_type_node, char_type_node,
17005 char_type_node, char_type_node,
17006 char_type_node, char_type_node,
17007 char_type_node, char_type_node,
17008 char_type_node, NULL_TREE);
17009 def_builtin ("__builtin_vec_init_v16qi", ftype,
17010 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
17012 ftype = build_function_type_list (V4SF_type_node, float_type_node,
17013 float_type_node, float_type_node,
17014 float_type_node, NULL_TREE);
17015 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
17017 /* VSX builtins. */
17018 ftype = build_function_type_list (V2DF_type_node, double_type_node,
17019 double_type_node, NULL_TREE);
17020 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
17022 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
17023 intDI_type_node, NULL_TREE);
17024 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
17026 /* Access to the vec_set patterns. */
17027 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
17028 intSI_type_node,
17029 integer_type_node, NULL_TREE);
17030 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
17032 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
17033 intHI_type_node,
17034 integer_type_node, NULL_TREE);
17035 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
17037 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
17038 intQI_type_node,
17039 integer_type_node, NULL_TREE);
17040 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
17042 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
17043 float_type_node,
17044 integer_type_node, NULL_TREE);
17045 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
17047 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
17048 double_type_node,
17049 integer_type_node, NULL_TREE);
17050 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
17052 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
17053 intDI_type_node,
17054 integer_type_node, NULL_TREE);
17055 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
17057 /* Access to the vec_extract patterns. */
17058 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
17059 integer_type_node, NULL_TREE);
17060 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
17062 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
17063 integer_type_node, NULL_TREE);
17064 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
17066 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
17067 integer_type_node, NULL_TREE);
17068 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
17070 ftype = build_function_type_list (float_type_node, V4SF_type_node,
17071 integer_type_node, NULL_TREE);
17072 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
17074 ftype = build_function_type_list (double_type_node, V2DF_type_node,
17075 integer_type_node, NULL_TREE);
17076 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
17078 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
17079 integer_type_node, NULL_TREE);
17080 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
17083 if (V1TI_type_node)
17085 tree v1ti_ftype_long_pcvoid
17086 = build_function_type_list (V1TI_type_node,
17087 long_integer_type_node, pcvoid_type_node,
17088 NULL_TREE);
17089 tree void_ftype_v1ti_long_pvoid
17090 = build_function_type_list (void_type_node,
17091 V1TI_type_node, long_integer_type_node,
17092 pvoid_type_node, NULL_TREE);
17093 def_builtin ("__builtin_vsx_ld_elemrev_v1ti", v1ti_ftype_long_pcvoid,
17094 VSX_BUILTIN_LD_ELEMREV_V1TI);
17095 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid,
17096 VSX_BUILTIN_LXVD2X_V1TI);
17097 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid,
17098 VSX_BUILTIN_STXVD2X_V1TI);
17099 ftype = build_function_type_list (V1TI_type_node, intTI_type_node,
17100 NULL_TREE, NULL_TREE);
17101 def_builtin ("__builtin_vec_init_v1ti", ftype, VSX_BUILTIN_VEC_INIT_V1TI);
17102 ftype = build_function_type_list (V1TI_type_node, V1TI_type_node,
17103 intTI_type_node,
17104 integer_type_node, NULL_TREE);
17105 def_builtin ("__builtin_vec_set_v1ti", ftype, VSX_BUILTIN_VEC_SET_V1TI);
17106 ftype = build_function_type_list (intTI_type_node, V1TI_type_node,
17107 integer_type_node, NULL_TREE);
17108 def_builtin ("__builtin_vec_ext_v1ti", ftype, VSX_BUILTIN_VEC_EXT_V1TI);
17113 static void
17114 htm_init_builtins (void)
17116 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17117 const struct builtin_description *d;
17118 size_t i;
17120 d = bdesc_htm;
17121 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
17123 tree op[MAX_HTM_OPERANDS], type;
17124 HOST_WIDE_INT mask = d->mask;
17125 unsigned attr = rs6000_builtin_info[d->code].attr;
17126 bool void_func = (attr & RS6000_BTC_VOID);
17127 int attr_args = (attr & RS6000_BTC_TYPE_MASK);
17128 int nopnds = 0;
17129 tree gpr_type_node;
17130 tree rettype;
17131 tree argtype;
17133 /* It is expected that these htm built-in functions may have
17134 d->icode equal to CODE_FOR_nothing. */
17136 if (TARGET_32BIT && TARGET_POWERPC64)
17137 gpr_type_node = long_long_unsigned_type_node;
17138 else
17139 gpr_type_node = long_unsigned_type_node;
17141 if (attr & RS6000_BTC_SPR)
17143 rettype = gpr_type_node;
17144 argtype = gpr_type_node;
17146 else if (d->code == HTM_BUILTIN_TABORTDC
17147 || d->code == HTM_BUILTIN_TABORTDCI)
17149 rettype = unsigned_type_node;
17150 argtype = gpr_type_node;
17152 else
17154 rettype = unsigned_type_node;
17155 argtype = unsigned_type_node;
17158 if ((mask & builtin_mask) != mask)
17160 if (TARGET_DEBUG_BUILTIN)
17161 fprintf (stderr, "htm_builtin, skip binary %s\n", d->name);
17162 continue;
17165 if (d->name == 0)
17167 if (TARGET_DEBUG_BUILTIN)
17168 fprintf (stderr, "htm_builtin, bdesc_htm[%ld] no name\n",
17169 (long unsigned) i);
17170 continue;
17173 op[nopnds++] = (void_func) ? void_type_node : rettype;
17175 if (attr_args == RS6000_BTC_UNARY)
17176 op[nopnds++] = argtype;
17177 else if (attr_args == RS6000_BTC_BINARY)
17179 op[nopnds++] = argtype;
17180 op[nopnds++] = argtype;
17182 else if (attr_args == RS6000_BTC_TERNARY)
17184 op[nopnds++] = argtype;
17185 op[nopnds++] = argtype;
17186 op[nopnds++] = argtype;
17189 switch (nopnds)
17191 case 1:
17192 type = build_function_type_list (op[0], NULL_TREE);
17193 break;
17194 case 2:
17195 type = build_function_type_list (op[0], op[1], NULL_TREE);
17196 break;
17197 case 3:
17198 type = build_function_type_list (op[0], op[1], op[2], NULL_TREE);
17199 break;
17200 case 4:
17201 type = build_function_type_list (op[0], op[1], op[2], op[3],
17202 NULL_TREE);
17203 break;
17204 default:
17205 gcc_unreachable ();
17208 def_builtin (d->name, type, d->code);
17212 /* Hash function for builtin functions with up to 3 arguments and a return
17213 type. */
17214 hashval_t
17215 builtin_hasher::hash (builtin_hash_struct *bh)
17217 unsigned ret = 0;
17218 int i;
17220 for (i = 0; i < 4; i++)
17222 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
17223 ret = (ret * 2) + bh->uns_p[i];
17226 return ret;
17229 /* Compare builtin hash entries H1 and H2 for equivalence. */
17230 bool
17231 builtin_hasher::equal (builtin_hash_struct *p1, builtin_hash_struct *p2)
17233 return ((p1->mode[0] == p2->mode[0])
17234 && (p1->mode[1] == p2->mode[1])
17235 && (p1->mode[2] == p2->mode[2])
17236 && (p1->mode[3] == p2->mode[3])
17237 && (p1->uns_p[0] == p2->uns_p[0])
17238 && (p1->uns_p[1] == p2->uns_p[1])
17239 && (p1->uns_p[2] == p2->uns_p[2])
17240 && (p1->uns_p[3] == p2->uns_p[3]));
17243 /* Map types for builtin functions with an explicit return type and up to 3
17244 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
17245 of the argument. */
17246 static tree
17247 builtin_function_type (machine_mode mode_ret, machine_mode mode_arg0,
17248 machine_mode mode_arg1, machine_mode mode_arg2,
17249 enum rs6000_builtins builtin, const char *name)
17251 struct builtin_hash_struct h;
17252 struct builtin_hash_struct *h2;
17253 int num_args = 3;
17254 int i;
17255 tree ret_type = NULL_TREE;
17256 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
17258 /* Create builtin_hash_table. */
17259 if (builtin_hash_table == NULL)
17260 builtin_hash_table = hash_table<builtin_hasher>::create_ggc (1500);
17262 h.type = NULL_TREE;
17263 h.mode[0] = mode_ret;
17264 h.mode[1] = mode_arg0;
17265 h.mode[2] = mode_arg1;
17266 h.mode[3] = mode_arg2;
17267 h.uns_p[0] = 0;
17268 h.uns_p[1] = 0;
17269 h.uns_p[2] = 0;
17270 h.uns_p[3] = 0;
17272 /* If the builtin is a type that produces unsigned results or takes unsigned
17273 arguments, and it is returned as a decl for the vectorizer (such as
17274 widening multiplies, permute), make sure the arguments and return value
17275 are type correct. */
17276 switch (builtin)
17278 /* unsigned 1 argument functions. */
17279 case CRYPTO_BUILTIN_VSBOX:
17280 case CRYPTO_BUILTIN_VSBOX_BE:
17281 case P8V_BUILTIN_VGBBD:
17282 case MISC_BUILTIN_CDTBCD:
17283 case MISC_BUILTIN_CBCDTD:
17284 h.uns_p[0] = 1;
17285 h.uns_p[1] = 1;
17286 break;
17288 /* unsigned 2 argument functions. */
17289 case ALTIVEC_BUILTIN_VMULEUB:
17290 case ALTIVEC_BUILTIN_VMULEUH:
17291 case P8V_BUILTIN_VMULEUW:
17292 case ALTIVEC_BUILTIN_VMULOUB:
17293 case ALTIVEC_BUILTIN_VMULOUH:
17294 case P8V_BUILTIN_VMULOUW:
17295 case CRYPTO_BUILTIN_VCIPHER:
17296 case CRYPTO_BUILTIN_VCIPHER_BE:
17297 case CRYPTO_BUILTIN_VCIPHERLAST:
17298 case CRYPTO_BUILTIN_VCIPHERLAST_BE:
17299 case CRYPTO_BUILTIN_VNCIPHER:
17300 case CRYPTO_BUILTIN_VNCIPHER_BE:
17301 case CRYPTO_BUILTIN_VNCIPHERLAST:
17302 case CRYPTO_BUILTIN_VNCIPHERLAST_BE:
17303 case CRYPTO_BUILTIN_VPMSUMB:
17304 case CRYPTO_BUILTIN_VPMSUMH:
17305 case CRYPTO_BUILTIN_VPMSUMW:
17306 case CRYPTO_BUILTIN_VPMSUMD:
17307 case CRYPTO_BUILTIN_VPMSUM:
17308 case MISC_BUILTIN_ADDG6S:
17309 case MISC_BUILTIN_DIVWEU:
17310 case MISC_BUILTIN_DIVDEU:
17311 case VSX_BUILTIN_UDIV_V2DI:
17312 case ALTIVEC_BUILTIN_VMAXUB:
17313 case ALTIVEC_BUILTIN_VMINUB:
17314 case ALTIVEC_BUILTIN_VMAXUH:
17315 case ALTIVEC_BUILTIN_VMINUH:
17316 case ALTIVEC_BUILTIN_VMAXUW:
17317 case ALTIVEC_BUILTIN_VMINUW:
17318 case P8V_BUILTIN_VMAXUD:
17319 case P8V_BUILTIN_VMINUD:
17320 h.uns_p[0] = 1;
17321 h.uns_p[1] = 1;
17322 h.uns_p[2] = 1;
17323 break;
17325 /* unsigned 3 argument functions. */
17326 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
17327 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
17328 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
17329 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
17330 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
17331 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
17332 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
17333 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
17334 case VSX_BUILTIN_VPERM_16QI_UNS:
17335 case VSX_BUILTIN_VPERM_8HI_UNS:
17336 case VSX_BUILTIN_VPERM_4SI_UNS:
17337 case VSX_BUILTIN_VPERM_2DI_UNS:
17338 case VSX_BUILTIN_XXSEL_16QI_UNS:
17339 case VSX_BUILTIN_XXSEL_8HI_UNS:
17340 case VSX_BUILTIN_XXSEL_4SI_UNS:
17341 case VSX_BUILTIN_XXSEL_2DI_UNS:
17342 case CRYPTO_BUILTIN_VPERMXOR:
17343 case CRYPTO_BUILTIN_VPERMXOR_V2DI:
17344 case CRYPTO_BUILTIN_VPERMXOR_V4SI:
17345 case CRYPTO_BUILTIN_VPERMXOR_V8HI:
17346 case CRYPTO_BUILTIN_VPERMXOR_V16QI:
17347 case CRYPTO_BUILTIN_VSHASIGMAW:
17348 case CRYPTO_BUILTIN_VSHASIGMAD:
17349 case CRYPTO_BUILTIN_VSHASIGMA:
17350 h.uns_p[0] = 1;
17351 h.uns_p[1] = 1;
17352 h.uns_p[2] = 1;
17353 h.uns_p[3] = 1;
17354 break;
17356 /* signed permute functions with unsigned char mask. */
17357 case ALTIVEC_BUILTIN_VPERM_16QI:
17358 case ALTIVEC_BUILTIN_VPERM_8HI:
17359 case ALTIVEC_BUILTIN_VPERM_4SI:
17360 case ALTIVEC_BUILTIN_VPERM_4SF:
17361 case ALTIVEC_BUILTIN_VPERM_2DI:
17362 case ALTIVEC_BUILTIN_VPERM_2DF:
17363 case VSX_BUILTIN_VPERM_16QI:
17364 case VSX_BUILTIN_VPERM_8HI:
17365 case VSX_BUILTIN_VPERM_4SI:
17366 case VSX_BUILTIN_VPERM_4SF:
17367 case VSX_BUILTIN_VPERM_2DI:
17368 case VSX_BUILTIN_VPERM_2DF:
17369 h.uns_p[3] = 1;
17370 break;
17372 /* unsigned args, signed return. */
17373 case VSX_BUILTIN_XVCVUXDSP:
17374 case VSX_BUILTIN_XVCVUXDDP_UNS:
17375 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
17376 h.uns_p[1] = 1;
17377 break;
17379 /* signed args, unsigned return. */
17380 case VSX_BUILTIN_XVCVDPUXDS_UNS:
17381 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
17382 case MISC_BUILTIN_UNPACK_TD:
17383 case MISC_BUILTIN_UNPACK_V1TI:
17384 h.uns_p[0] = 1;
17385 break;
17387 /* unsigned arguments, bool return (compares). */
17388 case ALTIVEC_BUILTIN_VCMPEQUB:
17389 case ALTIVEC_BUILTIN_VCMPEQUH:
17390 case ALTIVEC_BUILTIN_VCMPEQUW:
17391 case P8V_BUILTIN_VCMPEQUD:
17392 case VSX_BUILTIN_CMPGE_U16QI:
17393 case VSX_BUILTIN_CMPGE_U8HI:
17394 case VSX_BUILTIN_CMPGE_U4SI:
17395 case VSX_BUILTIN_CMPGE_U2DI:
17396 case ALTIVEC_BUILTIN_VCMPGTUB:
17397 case ALTIVEC_BUILTIN_VCMPGTUH:
17398 case ALTIVEC_BUILTIN_VCMPGTUW:
17399 case P8V_BUILTIN_VCMPGTUD:
17400 h.uns_p[1] = 1;
17401 h.uns_p[2] = 1;
17402 break;
17404 /* unsigned arguments for 128-bit pack instructions. */
17405 case MISC_BUILTIN_PACK_TD:
17406 case MISC_BUILTIN_PACK_V1TI:
17407 h.uns_p[1] = 1;
17408 h.uns_p[2] = 1;
17409 break;
17411 /* unsigned second arguments (vector shift right). */
17412 case ALTIVEC_BUILTIN_VSRB:
17413 case ALTIVEC_BUILTIN_VSRH:
17414 case ALTIVEC_BUILTIN_VSRW:
17415 case P8V_BUILTIN_VSRD:
17416 h.uns_p[2] = 1;
17417 break;
17419 default:
17420 break;
17423 /* Figure out how many args are present. */
17424 while (num_args > 0 && h.mode[num_args] == VOIDmode)
17425 num_args--;
17427 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
17428 if (!ret_type && h.uns_p[0])
17429 ret_type = builtin_mode_to_type[h.mode[0]][0];
17431 if (!ret_type)
17432 fatal_error (input_location,
17433 "internal error: builtin function %qs had an unexpected "
17434 "return type %qs", name, GET_MODE_NAME (h.mode[0]));
17436 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
17437 arg_type[i] = NULL_TREE;
17439 for (i = 0; i < num_args; i++)
17441 int m = (int) h.mode[i+1];
17442 int uns_p = h.uns_p[i+1];
17444 arg_type[i] = builtin_mode_to_type[m][uns_p];
17445 if (!arg_type[i] && uns_p)
17446 arg_type[i] = builtin_mode_to_type[m][0];
17448 if (!arg_type[i])
17449 fatal_error (input_location,
17450 "internal error: builtin function %qs, argument %d "
17451 "had unexpected argument type %qs", name, i,
17452 GET_MODE_NAME (m));
17455 builtin_hash_struct **found = builtin_hash_table->find_slot (&h, INSERT);
17456 if (*found == NULL)
17458 h2 = ggc_alloc<builtin_hash_struct> ();
17459 *h2 = h;
17460 *found = h2;
17462 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
17463 arg_type[2], NULL_TREE);
17466 return (*found)->type;
17469 static void
17470 rs6000_common_init_builtins (void)
17472 const struct builtin_description *d;
17473 size_t i;
17475 tree opaque_ftype_opaque = NULL_TREE;
17476 tree opaque_ftype_opaque_opaque = NULL_TREE;
17477 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
17478 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17480 /* Create Altivec and VSX builtins on machines with at least the
17481 general purpose extensions (970 and newer) to allow the use of
17482 the target attribute. */
17484 if (TARGET_EXTRA_BUILTINS)
17485 builtin_mask |= RS6000_BTM_COMMON;
17487 /* Add the ternary operators. */
17488 d = bdesc_3arg;
17489 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
17491 tree type;
17492 HOST_WIDE_INT mask = d->mask;
17494 if ((mask & builtin_mask) != mask)
17496 if (TARGET_DEBUG_BUILTIN)
17497 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
17498 continue;
17501 if (rs6000_overloaded_builtin_p (d->code))
17503 if (! (type = opaque_ftype_opaque_opaque_opaque))
17504 type = opaque_ftype_opaque_opaque_opaque
17505 = build_function_type_list (opaque_V4SI_type_node,
17506 opaque_V4SI_type_node,
17507 opaque_V4SI_type_node,
17508 opaque_V4SI_type_node,
17509 NULL_TREE);
17511 else
17513 enum insn_code icode = d->icode;
17514 if (d->name == 0)
17516 if (TARGET_DEBUG_BUILTIN)
17517 fprintf (stderr, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
17518 (long unsigned)i);
17520 continue;
17523 if (icode == CODE_FOR_nothing)
17525 if (TARGET_DEBUG_BUILTIN)
17526 fprintf (stderr, "rs6000_builtin, skip ternary %s (no code)\n",
17527 d->name);
17529 continue;
17532 type = builtin_function_type (insn_data[icode].operand[0].mode,
17533 insn_data[icode].operand[1].mode,
17534 insn_data[icode].operand[2].mode,
17535 insn_data[icode].operand[3].mode,
17536 d->code, d->name);
17539 def_builtin (d->name, type, d->code);
17542 /* Add the binary operators. */
17543 d = bdesc_2arg;
17544 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
17546 machine_mode mode0, mode1, mode2;
17547 tree type;
17548 HOST_WIDE_INT mask = d->mask;
17550 if ((mask & builtin_mask) != mask)
17552 if (TARGET_DEBUG_BUILTIN)
17553 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
17554 continue;
17557 if (rs6000_overloaded_builtin_p (d->code))
17559 if (! (type = opaque_ftype_opaque_opaque))
17560 type = opaque_ftype_opaque_opaque
17561 = build_function_type_list (opaque_V4SI_type_node,
17562 opaque_V4SI_type_node,
17563 opaque_V4SI_type_node,
17564 NULL_TREE);
17566 else
17568 enum insn_code icode = d->icode;
17569 if (d->name == 0)
17571 if (TARGET_DEBUG_BUILTIN)
17572 fprintf (stderr, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
17573 (long unsigned)i);
17575 continue;
17578 if (icode == CODE_FOR_nothing)
17580 if (TARGET_DEBUG_BUILTIN)
17581 fprintf (stderr, "rs6000_builtin, skip binary %s (no code)\n",
17582 d->name);
17584 continue;
17587 mode0 = insn_data[icode].operand[0].mode;
17588 mode1 = insn_data[icode].operand[1].mode;
17589 mode2 = insn_data[icode].operand[2].mode;
17591 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
17592 d->code, d->name);
17595 def_builtin (d->name, type, d->code);
17598 /* Add the simple unary operators. */
17599 d = bdesc_1arg;
17600 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
17602 machine_mode mode0, mode1;
17603 tree type;
17604 HOST_WIDE_INT mask = d->mask;
17606 if ((mask & builtin_mask) != mask)
17608 if (TARGET_DEBUG_BUILTIN)
17609 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
17610 continue;
17613 if (rs6000_overloaded_builtin_p (d->code))
17615 if (! (type = opaque_ftype_opaque))
17616 type = opaque_ftype_opaque
17617 = build_function_type_list (opaque_V4SI_type_node,
17618 opaque_V4SI_type_node,
17619 NULL_TREE);
17621 else
17623 enum insn_code icode = d->icode;
17624 if (d->name == 0)
17626 if (TARGET_DEBUG_BUILTIN)
17627 fprintf (stderr, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
17628 (long unsigned)i);
17630 continue;
17633 if (icode == CODE_FOR_nothing)
17635 if (TARGET_DEBUG_BUILTIN)
17636 fprintf (stderr, "rs6000_builtin, skip unary %s (no code)\n",
17637 d->name);
17639 continue;
17642 mode0 = insn_data[icode].operand[0].mode;
17643 mode1 = insn_data[icode].operand[1].mode;
17645 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
17646 d->code, d->name);
17649 def_builtin (d->name, type, d->code);
17652 /* Add the simple no-argument operators. */
17653 d = bdesc_0arg;
17654 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
17656 machine_mode mode0;
17657 tree type;
17658 HOST_WIDE_INT mask = d->mask;
17660 if ((mask & builtin_mask) != mask)
17662 if (TARGET_DEBUG_BUILTIN)
17663 fprintf (stderr, "rs6000_builtin, skip no-argument %s\n", d->name);
17664 continue;
17666 if (rs6000_overloaded_builtin_p (d->code))
17668 if (!opaque_ftype_opaque)
17669 opaque_ftype_opaque
17670 = build_function_type_list (opaque_V4SI_type_node, NULL_TREE);
17671 type = opaque_ftype_opaque;
17673 else
17675 enum insn_code icode = d->icode;
17676 if (d->name == 0)
17678 if (TARGET_DEBUG_BUILTIN)
17679 fprintf (stderr, "rs6000_builtin, bdesc_0arg[%lu] no name\n",
17680 (long unsigned) i);
17681 continue;
17683 if (icode == CODE_FOR_nothing)
17685 if (TARGET_DEBUG_BUILTIN)
17686 fprintf (stderr,
17687 "rs6000_builtin, skip no-argument %s (no code)\n",
17688 d->name);
17689 continue;
17691 mode0 = insn_data[icode].operand[0].mode;
17692 type = builtin_function_type (mode0, VOIDmode, VOIDmode, VOIDmode,
17693 d->code, d->name);
17695 def_builtin (d->name, type, d->code);
17699 /* Set up AIX/Darwin/64-bit Linux quad floating point routines. */
17700 static void
17701 init_float128_ibm (machine_mode mode)
17703 if (!TARGET_XL_COMPAT)
17705 set_optab_libfunc (add_optab, mode, "__gcc_qadd");
17706 set_optab_libfunc (sub_optab, mode, "__gcc_qsub");
17707 set_optab_libfunc (smul_optab, mode, "__gcc_qmul");
17708 set_optab_libfunc (sdiv_optab, mode, "__gcc_qdiv");
17710 if (!TARGET_HARD_FLOAT)
17712 set_optab_libfunc (neg_optab, mode, "__gcc_qneg");
17713 set_optab_libfunc (eq_optab, mode, "__gcc_qeq");
17714 set_optab_libfunc (ne_optab, mode, "__gcc_qne");
17715 set_optab_libfunc (gt_optab, mode, "__gcc_qgt");
17716 set_optab_libfunc (ge_optab, mode, "__gcc_qge");
17717 set_optab_libfunc (lt_optab, mode, "__gcc_qlt");
17718 set_optab_libfunc (le_optab, mode, "__gcc_qle");
17719 set_optab_libfunc (unord_optab, mode, "__gcc_qunord");
17721 set_conv_libfunc (sext_optab, mode, SFmode, "__gcc_stoq");
17722 set_conv_libfunc (sext_optab, mode, DFmode, "__gcc_dtoq");
17723 set_conv_libfunc (trunc_optab, SFmode, mode, "__gcc_qtos");
17724 set_conv_libfunc (trunc_optab, DFmode, mode, "__gcc_qtod");
17725 set_conv_libfunc (sfix_optab, SImode, mode, "__gcc_qtoi");
17726 set_conv_libfunc (ufix_optab, SImode, mode, "__gcc_qtou");
17727 set_conv_libfunc (sfloat_optab, mode, SImode, "__gcc_itoq");
17728 set_conv_libfunc (ufloat_optab, mode, SImode, "__gcc_utoq");
17731 else
17733 set_optab_libfunc (add_optab, mode, "_xlqadd");
17734 set_optab_libfunc (sub_optab, mode, "_xlqsub");
17735 set_optab_libfunc (smul_optab, mode, "_xlqmul");
17736 set_optab_libfunc (sdiv_optab, mode, "_xlqdiv");
17739 /* Add various conversions for IFmode to use the traditional TFmode
17740 names. */
17741 if (mode == IFmode)
17743 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdtf");
17744 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddtf");
17745 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctdtf");
17746 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunctfsd");
17747 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunctfdd");
17748 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtftd");
17750 if (TARGET_POWERPC64)
17752 set_conv_libfunc (sfix_optab, TImode, mode, "__fixtfti");
17753 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunstfti");
17754 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattitf");
17755 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntitf");
17760 /* Create a decl for either complex long double multiply or complex long double
17761 divide when long double is IEEE 128-bit floating point. We can't use
17762 __multc3 and __divtc3 because the original long double using IBM extended
17763 double used those names. The complex multiply/divide functions are encoded
17764 as builtin functions with a complex result and 4 scalar inputs. */
17766 static void
17767 create_complex_muldiv (const char *name, built_in_function fncode, tree fntype)
17769 tree fndecl = add_builtin_function (name, fntype, fncode, BUILT_IN_NORMAL,
17770 name, NULL_TREE);
17772 set_builtin_decl (fncode, fndecl, true);
17774 if (TARGET_DEBUG_BUILTIN)
17775 fprintf (stderr, "create complex %s, fncode: %d\n", name, (int) fncode);
17777 return;
17780 /* Set up IEEE 128-bit floating point routines. Use different names if the
17781 arguments can be passed in a vector register. The historical PowerPC
17782 implementation of IEEE 128-bit floating point used _q_<op> for the names, so
17783 continue to use that if we aren't using vector registers to pass IEEE
17784 128-bit floating point. */
17786 static void
17787 init_float128_ieee (machine_mode mode)
17789 if (FLOAT128_VECTOR_P (mode))
17791 static bool complex_muldiv_init_p = false;
17793 /* Set up to call __mulkc3 and __divkc3 under -mabi=ieeelongdouble. If
17794 we have clone or target attributes, this will be called a second
17795 time. We want to create the built-in function only once. */
17796 if (mode == TFmode && TARGET_IEEEQUAD && !complex_muldiv_init_p)
17798 complex_muldiv_init_p = true;
17799 built_in_function fncode_mul =
17800 (built_in_function) (BUILT_IN_COMPLEX_MUL_MIN + TCmode
17801 - MIN_MODE_COMPLEX_FLOAT);
17802 built_in_function fncode_div =
17803 (built_in_function) (BUILT_IN_COMPLEX_DIV_MIN + TCmode
17804 - MIN_MODE_COMPLEX_FLOAT);
17806 tree fntype = build_function_type_list (complex_long_double_type_node,
17807 long_double_type_node,
17808 long_double_type_node,
17809 long_double_type_node,
17810 long_double_type_node,
17811 NULL_TREE);
17813 create_complex_muldiv ("__mulkc3", fncode_mul, fntype);
17814 create_complex_muldiv ("__divkc3", fncode_div, fntype);
17817 set_optab_libfunc (add_optab, mode, "__addkf3");
17818 set_optab_libfunc (sub_optab, mode, "__subkf3");
17819 set_optab_libfunc (neg_optab, mode, "__negkf2");
17820 set_optab_libfunc (smul_optab, mode, "__mulkf3");
17821 set_optab_libfunc (sdiv_optab, mode, "__divkf3");
17822 set_optab_libfunc (sqrt_optab, mode, "__sqrtkf2");
17823 set_optab_libfunc (abs_optab, mode, "__abskf2");
17824 set_optab_libfunc (powi_optab, mode, "__powikf2");
17826 set_optab_libfunc (eq_optab, mode, "__eqkf2");
17827 set_optab_libfunc (ne_optab, mode, "__nekf2");
17828 set_optab_libfunc (gt_optab, mode, "__gtkf2");
17829 set_optab_libfunc (ge_optab, mode, "__gekf2");
17830 set_optab_libfunc (lt_optab, mode, "__ltkf2");
17831 set_optab_libfunc (le_optab, mode, "__lekf2");
17832 set_optab_libfunc (unord_optab, mode, "__unordkf2");
17834 set_conv_libfunc (sext_optab, mode, SFmode, "__extendsfkf2");
17835 set_conv_libfunc (sext_optab, mode, DFmode, "__extenddfkf2");
17836 set_conv_libfunc (trunc_optab, SFmode, mode, "__trunckfsf2");
17837 set_conv_libfunc (trunc_optab, DFmode, mode, "__trunckfdf2");
17839 set_conv_libfunc (sext_optab, mode, IFmode, "__trunctfkf2");
17840 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
17841 set_conv_libfunc (sext_optab, mode, TFmode, "__trunctfkf2");
17843 set_conv_libfunc (trunc_optab, IFmode, mode, "__extendkftf2");
17844 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
17845 set_conv_libfunc (trunc_optab, TFmode, mode, "__extendkftf2");
17847 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdkf");
17848 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddkf");
17849 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctdkf");
17850 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunckfsd");
17851 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunckfdd");
17852 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendkftd");
17854 set_conv_libfunc (sfix_optab, SImode, mode, "__fixkfsi");
17855 set_conv_libfunc (ufix_optab, SImode, mode, "__fixunskfsi");
17856 set_conv_libfunc (sfix_optab, DImode, mode, "__fixkfdi");
17857 set_conv_libfunc (ufix_optab, DImode, mode, "__fixunskfdi");
17859 set_conv_libfunc (sfloat_optab, mode, SImode, "__floatsikf");
17860 set_conv_libfunc (ufloat_optab, mode, SImode, "__floatunsikf");
17861 set_conv_libfunc (sfloat_optab, mode, DImode, "__floatdikf");
17862 set_conv_libfunc (ufloat_optab, mode, DImode, "__floatundikf");
17864 if (TARGET_POWERPC64)
17866 set_conv_libfunc (sfix_optab, TImode, mode, "__fixkfti");
17867 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunskfti");
17868 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattikf");
17869 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntikf");
17873 else
17875 set_optab_libfunc (add_optab, mode, "_q_add");
17876 set_optab_libfunc (sub_optab, mode, "_q_sub");
17877 set_optab_libfunc (neg_optab, mode, "_q_neg");
17878 set_optab_libfunc (smul_optab, mode, "_q_mul");
17879 set_optab_libfunc (sdiv_optab, mode, "_q_div");
17880 if (TARGET_PPC_GPOPT)
17881 set_optab_libfunc (sqrt_optab, mode, "_q_sqrt");
17883 set_optab_libfunc (eq_optab, mode, "_q_feq");
17884 set_optab_libfunc (ne_optab, mode, "_q_fne");
17885 set_optab_libfunc (gt_optab, mode, "_q_fgt");
17886 set_optab_libfunc (ge_optab, mode, "_q_fge");
17887 set_optab_libfunc (lt_optab, mode, "_q_flt");
17888 set_optab_libfunc (le_optab, mode, "_q_fle");
17890 set_conv_libfunc (sext_optab, mode, SFmode, "_q_stoq");
17891 set_conv_libfunc (sext_optab, mode, DFmode, "_q_dtoq");
17892 set_conv_libfunc (trunc_optab, SFmode, mode, "_q_qtos");
17893 set_conv_libfunc (trunc_optab, DFmode, mode, "_q_qtod");
17894 set_conv_libfunc (sfix_optab, SImode, mode, "_q_qtoi");
17895 set_conv_libfunc (ufix_optab, SImode, mode, "_q_qtou");
17896 set_conv_libfunc (sfloat_optab, mode, SImode, "_q_itoq");
17897 set_conv_libfunc (ufloat_optab, mode, SImode, "_q_utoq");
17901 static void
17902 rs6000_init_libfuncs (void)
17904 /* __float128 support. */
17905 if (TARGET_FLOAT128_TYPE)
17907 init_float128_ibm (IFmode);
17908 init_float128_ieee (KFmode);
17911 /* AIX/Darwin/64-bit Linux quad floating point routines. */
17912 if (TARGET_LONG_DOUBLE_128)
17914 if (!TARGET_IEEEQUAD)
17915 init_float128_ibm (TFmode);
17917 /* IEEE 128-bit including 32-bit SVR4 quad floating point routines. */
17918 else
17919 init_float128_ieee (TFmode);
17923 /* Emit a potentially record-form instruction, setting DST from SRC.
17924 If DOT is 0, that is all; otherwise, set CCREG to the result of the
17925 signed comparison of DST with zero. If DOT is 1, the generated RTL
17926 doesn't care about the DST result; if DOT is 2, it does. If CCREG
17927 is CR0 do a single dot insn (as a PARALLEL); otherwise, do a SET and
17928 a separate COMPARE. */
17930 void
17931 rs6000_emit_dot_insn (rtx dst, rtx src, int dot, rtx ccreg)
17933 if (dot == 0)
17935 emit_move_insn (dst, src);
17936 return;
17939 if (cc_reg_not_cr0_operand (ccreg, CCmode))
17941 emit_move_insn (dst, src);
17942 emit_move_insn (ccreg, gen_rtx_COMPARE (CCmode, dst, const0_rtx));
17943 return;
17946 rtx ccset = gen_rtx_SET (ccreg, gen_rtx_COMPARE (CCmode, src, const0_rtx));
17947 if (dot == 1)
17949 rtx clobber = gen_rtx_CLOBBER (VOIDmode, dst);
17950 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, clobber)));
17952 else
17954 rtx set = gen_rtx_SET (dst, src);
17955 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, set)));
17960 /* A validation routine: say whether CODE, a condition code, and MODE
17961 match. The other alternatives either don't make sense or should
17962 never be generated. */
17964 void
17965 validate_condition_mode (enum rtx_code code, machine_mode mode)
17967 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
17968 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
17969 && GET_MODE_CLASS (mode) == MODE_CC);
17971 /* These don't make sense. */
17972 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
17973 || mode != CCUNSmode);
17975 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
17976 || mode == CCUNSmode);
17978 gcc_assert (mode == CCFPmode
17979 || (code != ORDERED && code != UNORDERED
17980 && code != UNEQ && code != LTGT
17981 && code != UNGT && code != UNLT
17982 && code != UNGE && code != UNLE));
17984 /* These should never be generated except for
17985 flag_finite_math_only. */
17986 gcc_assert (mode != CCFPmode
17987 || flag_finite_math_only
17988 || (code != LE && code != GE
17989 && code != UNEQ && code != LTGT
17990 && code != UNGT && code != UNLT));
17992 /* These are invalid; the information is not there. */
17993 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
17997 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm,
17998 rldicl, rldicr, or rldic instruction in mode MODE. If so, if E is
17999 not zero, store there the bit offset (counted from the right) where
18000 the single stretch of 1 bits begins; and similarly for B, the bit
18001 offset where it ends. */
18003 bool
18004 rs6000_is_valid_mask (rtx mask, int *b, int *e, machine_mode mode)
18006 unsigned HOST_WIDE_INT val = INTVAL (mask);
18007 unsigned HOST_WIDE_INT bit;
18008 int nb, ne;
18009 int n = GET_MODE_PRECISION (mode);
18011 if (mode != DImode && mode != SImode)
18012 return false;
18014 if (INTVAL (mask) >= 0)
18016 bit = val & -val;
18017 ne = exact_log2 (bit);
18018 nb = exact_log2 (val + bit);
18020 else if (val + 1 == 0)
18022 nb = n;
18023 ne = 0;
18025 else if (val & 1)
18027 val = ~val;
18028 bit = val & -val;
18029 nb = exact_log2 (bit);
18030 ne = exact_log2 (val + bit);
18032 else
18034 bit = val & -val;
18035 ne = exact_log2 (bit);
18036 if (val + bit == 0)
18037 nb = n;
18038 else
18039 nb = 0;
18042 nb--;
18044 if (nb < 0 || ne < 0 || nb >= n || ne >= n)
18045 return false;
18047 if (b)
18048 *b = nb;
18049 if (e)
18050 *e = ne;
18052 return true;
18055 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm, rldicl,
18056 or rldicr instruction, to implement an AND with it in mode MODE. */
18058 bool
18059 rs6000_is_valid_and_mask (rtx mask, machine_mode mode)
18061 int nb, ne;
18063 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18064 return false;
18066 /* For DImode, we need a rldicl, rldicr, or a rlwinm with mask that
18067 does not wrap. */
18068 if (mode == DImode)
18069 return (ne == 0 || nb == 63 || (nb < 32 && ne <= nb));
18071 /* For SImode, rlwinm can do everything. */
18072 if (mode == SImode)
18073 return (nb < 32 && ne < 32);
18075 return false;
18078 /* Return the instruction template for an AND with mask in mode MODE, with
18079 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18081 const char *
18082 rs6000_insn_for_and_mask (machine_mode mode, rtx *operands, bool dot)
18084 int nb, ne;
18086 if (!rs6000_is_valid_mask (operands[2], &nb, &ne, mode))
18087 gcc_unreachable ();
18089 if (mode == DImode && ne == 0)
18091 operands[3] = GEN_INT (63 - nb);
18092 if (dot)
18093 return "rldicl. %0,%1,0,%3";
18094 return "rldicl %0,%1,0,%3";
18097 if (mode == DImode && nb == 63)
18099 operands[3] = GEN_INT (63 - ne);
18100 if (dot)
18101 return "rldicr. %0,%1,0,%3";
18102 return "rldicr %0,%1,0,%3";
18105 if (nb < 32 && ne < 32)
18107 operands[3] = GEN_INT (31 - nb);
18108 operands[4] = GEN_INT (31 - ne);
18109 if (dot)
18110 return "rlwinm. %0,%1,0,%3,%4";
18111 return "rlwinm %0,%1,0,%3,%4";
18114 gcc_unreachable ();
18117 /* Return whether MASK (a CONST_INT) is a valid mask for any rlw[i]nm,
18118 rld[i]cl, rld[i]cr, or rld[i]c instruction, to implement an AND with
18119 shift SHIFT (a ROTATE, ASHIFT, or LSHIFTRT) in mode MODE. */
18121 bool
18122 rs6000_is_valid_shift_mask (rtx mask, rtx shift, machine_mode mode)
18124 int nb, ne;
18126 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18127 return false;
18129 int n = GET_MODE_PRECISION (mode);
18130 int sh = -1;
18132 if (CONST_INT_P (XEXP (shift, 1)))
18134 sh = INTVAL (XEXP (shift, 1));
18135 if (sh < 0 || sh >= n)
18136 return false;
18139 rtx_code code = GET_CODE (shift);
18141 /* Convert any shift by 0 to a rotate, to simplify below code. */
18142 if (sh == 0)
18143 code = ROTATE;
18145 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18146 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18147 code = ASHIFT;
18148 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18150 code = LSHIFTRT;
18151 sh = n - sh;
18154 /* DImode rotates need rld*. */
18155 if (mode == DImode && code == ROTATE)
18156 return (nb == 63 || ne == 0 || ne == sh);
18158 /* SImode rotates need rlw*. */
18159 if (mode == SImode && code == ROTATE)
18160 return (nb < 32 && ne < 32 && sh < 32);
18162 /* Wrap-around masks are only okay for rotates. */
18163 if (ne > nb)
18164 return false;
18166 /* Variable shifts are only okay for rotates. */
18167 if (sh < 0)
18168 return false;
18170 /* Don't allow ASHIFT if the mask is wrong for that. */
18171 if (code == ASHIFT && ne < sh)
18172 return false;
18174 /* If we can do it with an rlw*, we can do it. Don't allow LSHIFTRT
18175 if the mask is wrong for that. */
18176 if (nb < 32 && ne < 32 && sh < 32
18177 && !(code == LSHIFTRT && nb >= 32 - sh))
18178 return true;
18180 /* If we can do it with an rld*, we can do it. Don't allow LSHIFTRT
18181 if the mask is wrong for that. */
18182 if (code == LSHIFTRT)
18183 sh = 64 - sh;
18184 if (nb == 63 || ne == 0 || ne == sh)
18185 return !(code == LSHIFTRT && nb >= sh);
18187 return false;
18190 /* Return the instruction template for a shift with mask in mode MODE, with
18191 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18193 const char *
18194 rs6000_insn_for_shift_mask (machine_mode mode, rtx *operands, bool dot)
18196 int nb, ne;
18198 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18199 gcc_unreachable ();
18201 if (mode == DImode && ne == 0)
18203 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18204 operands[2] = GEN_INT (64 - INTVAL (operands[2]));
18205 operands[3] = GEN_INT (63 - nb);
18206 if (dot)
18207 return "rld%I2cl. %0,%1,%2,%3";
18208 return "rld%I2cl %0,%1,%2,%3";
18211 if (mode == DImode && nb == 63)
18213 operands[3] = GEN_INT (63 - ne);
18214 if (dot)
18215 return "rld%I2cr. %0,%1,%2,%3";
18216 return "rld%I2cr %0,%1,%2,%3";
18219 if (mode == DImode
18220 && GET_CODE (operands[4]) != LSHIFTRT
18221 && CONST_INT_P (operands[2])
18222 && ne == INTVAL (operands[2]))
18224 operands[3] = GEN_INT (63 - nb);
18225 if (dot)
18226 return "rld%I2c. %0,%1,%2,%3";
18227 return "rld%I2c %0,%1,%2,%3";
18230 if (nb < 32 && ne < 32)
18232 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18233 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18234 operands[3] = GEN_INT (31 - nb);
18235 operands[4] = GEN_INT (31 - ne);
18236 /* This insn can also be a 64-bit rotate with mask that really makes
18237 it just a shift right (with mask); the %h below are to adjust for
18238 that situation (shift count is >= 32 in that case). */
18239 if (dot)
18240 return "rlw%I2nm. %0,%1,%h2,%3,%4";
18241 return "rlw%I2nm %0,%1,%h2,%3,%4";
18244 gcc_unreachable ();
18247 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwimi or
18248 rldimi instruction, to implement an insert with shift SHIFT (a ROTATE,
18249 ASHIFT, or LSHIFTRT) in mode MODE. */
18251 bool
18252 rs6000_is_valid_insert_mask (rtx mask, rtx shift, machine_mode mode)
18254 int nb, ne;
18256 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18257 return false;
18259 int n = GET_MODE_PRECISION (mode);
18261 int sh = INTVAL (XEXP (shift, 1));
18262 if (sh < 0 || sh >= n)
18263 return false;
18265 rtx_code code = GET_CODE (shift);
18267 /* Convert any shift by 0 to a rotate, to simplify below code. */
18268 if (sh == 0)
18269 code = ROTATE;
18271 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18272 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18273 code = ASHIFT;
18274 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18276 code = LSHIFTRT;
18277 sh = n - sh;
18280 /* DImode rotates need rldimi. */
18281 if (mode == DImode && code == ROTATE)
18282 return (ne == sh);
18284 /* SImode rotates need rlwimi. */
18285 if (mode == SImode && code == ROTATE)
18286 return (nb < 32 && ne < 32 && sh < 32);
18288 /* Wrap-around masks are only okay for rotates. */
18289 if (ne > nb)
18290 return false;
18292 /* Don't allow ASHIFT if the mask is wrong for that. */
18293 if (code == ASHIFT && ne < sh)
18294 return false;
18296 /* If we can do it with an rlwimi, we can do it. Don't allow LSHIFTRT
18297 if the mask is wrong for that. */
18298 if (nb < 32 && ne < 32 && sh < 32
18299 && !(code == LSHIFTRT && nb >= 32 - sh))
18300 return true;
18302 /* If we can do it with an rldimi, we can do it. Don't allow LSHIFTRT
18303 if the mask is wrong for that. */
18304 if (code == LSHIFTRT)
18305 sh = 64 - sh;
18306 if (ne == sh)
18307 return !(code == LSHIFTRT && nb >= sh);
18309 return false;
18312 /* Return the instruction template for an insert with mask in mode MODE, with
18313 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18315 const char *
18316 rs6000_insn_for_insert_mask (machine_mode mode, rtx *operands, bool dot)
18318 int nb, ne;
18320 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18321 gcc_unreachable ();
18323 /* Prefer rldimi because rlwimi is cracked. */
18324 if (TARGET_POWERPC64
18325 && (!dot || mode == DImode)
18326 && GET_CODE (operands[4]) != LSHIFTRT
18327 && ne == INTVAL (operands[2]))
18329 operands[3] = GEN_INT (63 - nb);
18330 if (dot)
18331 return "rldimi. %0,%1,%2,%3";
18332 return "rldimi %0,%1,%2,%3";
18335 if (nb < 32 && ne < 32)
18337 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18338 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18339 operands[3] = GEN_INT (31 - nb);
18340 operands[4] = GEN_INT (31 - ne);
18341 if (dot)
18342 return "rlwimi. %0,%1,%2,%3,%4";
18343 return "rlwimi %0,%1,%2,%3,%4";
18346 gcc_unreachable ();
18349 /* Return whether an AND with C (a CONST_INT) in mode MODE can be done
18350 using two machine instructions. */
18352 bool
18353 rs6000_is_valid_2insn_and (rtx c, machine_mode mode)
18355 /* There are two kinds of AND we can handle with two insns:
18356 1) those we can do with two rl* insn;
18357 2) ori[s];xori[s].
18359 We do not handle that last case yet. */
18361 /* If there is just one stretch of ones, we can do it. */
18362 if (rs6000_is_valid_mask (c, NULL, NULL, mode))
18363 return true;
18365 /* Otherwise, fill in the lowest "hole"; if we can do the result with
18366 one insn, we can do the whole thing with two. */
18367 unsigned HOST_WIDE_INT val = INTVAL (c);
18368 unsigned HOST_WIDE_INT bit1 = val & -val;
18369 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18370 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18371 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18372 return rs6000_is_valid_and_mask (GEN_INT (val + bit3 - bit2), mode);
18375 /* Emit the two insns to do an AND in mode MODE, with operands OPERANDS.
18376 If EXPAND is true, split rotate-and-mask instructions we generate to
18377 their constituent parts as well (this is used during expand); if DOT
18378 is 1, make the last insn a record-form instruction clobbering the
18379 destination GPR and setting the CC reg (from operands[3]); if 2, set
18380 that GPR as well as the CC reg. */
18382 void
18383 rs6000_emit_2insn_and (machine_mode mode, rtx *operands, bool expand, int dot)
18385 gcc_assert (!(expand && dot));
18387 unsigned HOST_WIDE_INT val = INTVAL (operands[2]);
18389 /* If it is one stretch of ones, it is DImode; shift left, mask, then
18390 shift right. This generates better code than doing the masks without
18391 shifts, or shifting first right and then left. */
18392 int nb, ne;
18393 if (rs6000_is_valid_mask (operands[2], &nb, &ne, mode) && nb >= ne)
18395 gcc_assert (mode == DImode);
18397 int shift = 63 - nb;
18398 if (expand)
18400 rtx tmp1 = gen_reg_rtx (DImode);
18401 rtx tmp2 = gen_reg_rtx (DImode);
18402 emit_insn (gen_ashldi3 (tmp1, operands[1], GEN_INT (shift)));
18403 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (val << shift)));
18404 emit_insn (gen_lshrdi3 (operands[0], tmp2, GEN_INT (shift)));
18406 else
18408 rtx tmp = gen_rtx_ASHIFT (mode, operands[1], GEN_INT (shift));
18409 tmp = gen_rtx_AND (mode, tmp, GEN_INT (val << shift));
18410 emit_move_insn (operands[0], tmp);
18411 tmp = gen_rtx_LSHIFTRT (mode, operands[0], GEN_INT (shift));
18412 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18414 return;
18417 /* Otherwise, make a mask2 that cuts out the lowest "hole", and a mask1
18418 that does the rest. */
18419 unsigned HOST_WIDE_INT bit1 = val & -val;
18420 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18421 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18422 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18424 unsigned HOST_WIDE_INT mask1 = -bit3 + bit2 - 1;
18425 unsigned HOST_WIDE_INT mask2 = val + bit3 - bit2;
18427 gcc_assert (rs6000_is_valid_and_mask (GEN_INT (mask2), mode));
18429 /* Two "no-rotate"-and-mask instructions, for SImode. */
18430 if (rs6000_is_valid_and_mask (GEN_INT (mask1), mode))
18432 gcc_assert (mode == SImode);
18434 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18435 rtx tmp = gen_rtx_AND (mode, operands[1], GEN_INT (mask1));
18436 emit_move_insn (reg, tmp);
18437 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18438 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18439 return;
18442 gcc_assert (mode == DImode);
18444 /* Two "no-rotate"-and-mask instructions, for DImode: both are rlwinm
18445 insns; we have to do the first in SImode, because it wraps. */
18446 if (mask2 <= 0xffffffff
18447 && rs6000_is_valid_and_mask (GEN_INT (mask1), SImode))
18449 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18450 rtx tmp = gen_rtx_AND (SImode, gen_lowpart (SImode, operands[1]),
18451 GEN_INT (mask1));
18452 rtx reg_low = gen_lowpart (SImode, reg);
18453 emit_move_insn (reg_low, tmp);
18454 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18455 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18456 return;
18459 /* Two rld* insns: rotate, clear the hole in the middle (which now is
18460 at the top end), rotate back and clear the other hole. */
18461 int right = exact_log2 (bit3);
18462 int left = 64 - right;
18464 /* Rotate the mask too. */
18465 mask1 = (mask1 >> right) | ((bit2 - 1) << left);
18467 if (expand)
18469 rtx tmp1 = gen_reg_rtx (DImode);
18470 rtx tmp2 = gen_reg_rtx (DImode);
18471 rtx tmp3 = gen_reg_rtx (DImode);
18472 emit_insn (gen_rotldi3 (tmp1, operands[1], GEN_INT (left)));
18473 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (mask1)));
18474 emit_insn (gen_rotldi3 (tmp3, tmp2, GEN_INT (right)));
18475 emit_insn (gen_anddi3 (operands[0], tmp3, GEN_INT (mask2)));
18477 else
18479 rtx tmp = gen_rtx_ROTATE (mode, operands[1], GEN_INT (left));
18480 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask1));
18481 emit_move_insn (operands[0], tmp);
18482 tmp = gen_rtx_ROTATE (mode, operands[0], GEN_INT (right));
18483 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask2));
18484 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18488 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
18489 for lfq and stfq insns iff the registers are hard registers. */
18492 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
18494 /* We might have been passed a SUBREG. */
18495 if (!REG_P (reg1) || !REG_P (reg2))
18496 return 0;
18498 /* We might have been passed non floating point registers. */
18499 if (!FP_REGNO_P (REGNO (reg1))
18500 || !FP_REGNO_P (REGNO (reg2)))
18501 return 0;
18503 return (REGNO (reg1) == REGNO (reg2) - 1);
18506 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
18507 addr1 and addr2 must be in consecutive memory locations
18508 (addr2 == addr1 + 8). */
18511 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
18513 rtx addr1, addr2;
18514 unsigned int reg1, reg2;
18515 int offset1, offset2;
18517 /* The mems cannot be volatile. */
18518 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
18519 return 0;
18521 addr1 = XEXP (mem1, 0);
18522 addr2 = XEXP (mem2, 0);
18524 /* Extract an offset (if used) from the first addr. */
18525 if (GET_CODE (addr1) == PLUS)
18527 /* If not a REG, return zero. */
18528 if (!REG_P (XEXP (addr1, 0)))
18529 return 0;
18530 else
18532 reg1 = REGNO (XEXP (addr1, 0));
18533 /* The offset must be constant! */
18534 if (!CONST_INT_P (XEXP (addr1, 1)))
18535 return 0;
18536 offset1 = INTVAL (XEXP (addr1, 1));
18539 else if (!REG_P (addr1))
18540 return 0;
18541 else
18543 reg1 = REGNO (addr1);
18544 /* This was a simple (mem (reg)) expression. Offset is 0. */
18545 offset1 = 0;
18548 /* And now for the second addr. */
18549 if (GET_CODE (addr2) == PLUS)
18551 /* If not a REG, return zero. */
18552 if (!REG_P (XEXP (addr2, 0)))
18553 return 0;
18554 else
18556 reg2 = REGNO (XEXP (addr2, 0));
18557 /* The offset must be constant. */
18558 if (!CONST_INT_P (XEXP (addr2, 1)))
18559 return 0;
18560 offset2 = INTVAL (XEXP (addr2, 1));
18563 else if (!REG_P (addr2))
18564 return 0;
18565 else
18567 reg2 = REGNO (addr2);
18568 /* This was a simple (mem (reg)) expression. Offset is 0. */
18569 offset2 = 0;
18572 /* Both of these must have the same base register. */
18573 if (reg1 != reg2)
18574 return 0;
18576 /* The offset for the second addr must be 8 more than the first addr. */
18577 if (offset2 != offset1 + 8)
18578 return 0;
18580 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
18581 instructions. */
18582 return 1;
18585 /* Implement TARGET_SECONDARY_RELOAD_NEEDED_MODE. For SDmode values we
18586 need to use DDmode, in all other cases we can use the same mode. */
18587 static machine_mode
18588 rs6000_secondary_memory_needed_mode (machine_mode mode)
18590 if (lra_in_progress && mode == SDmode)
18591 return DDmode;
18592 return mode;
18595 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
18596 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
18597 only work on the traditional altivec registers, note if an altivec register
18598 was chosen. */
18600 static enum rs6000_reg_type
18601 register_to_reg_type (rtx reg, bool *is_altivec)
18603 HOST_WIDE_INT regno;
18604 enum reg_class rclass;
18606 if (SUBREG_P (reg))
18607 reg = SUBREG_REG (reg);
18609 if (!REG_P (reg))
18610 return NO_REG_TYPE;
18612 regno = REGNO (reg);
18613 if (!HARD_REGISTER_NUM_P (regno))
18615 if (!lra_in_progress && !reload_completed)
18616 return PSEUDO_REG_TYPE;
18618 regno = true_regnum (reg);
18619 if (regno < 0 || !HARD_REGISTER_NUM_P (regno))
18620 return PSEUDO_REG_TYPE;
18623 gcc_assert (regno >= 0);
18625 if (is_altivec && ALTIVEC_REGNO_P (regno))
18626 *is_altivec = true;
18628 rclass = rs6000_regno_regclass[regno];
18629 return reg_class_to_reg_type[(int)rclass];
18632 /* Helper function to return the cost of adding a TOC entry address. */
18634 static inline int
18635 rs6000_secondary_reload_toc_costs (addr_mask_type addr_mask)
18637 int ret;
18639 if (TARGET_CMODEL != CMODEL_SMALL)
18640 ret = ((addr_mask & RELOAD_REG_OFFSET) == 0) ? 1 : 2;
18642 else
18643 ret = (TARGET_MINIMAL_TOC) ? 6 : 3;
18645 return ret;
18648 /* Helper function for rs6000_secondary_reload to determine whether the memory
18649 address (ADDR) with a given register class (RCLASS) and machine mode (MODE)
18650 needs reloading. Return negative if the memory is not handled by the memory
18651 helper functions and to try a different reload method, 0 if no additional
18652 instructions are need, and positive to give the extra cost for the
18653 memory. */
18655 static int
18656 rs6000_secondary_reload_memory (rtx addr,
18657 enum reg_class rclass,
18658 machine_mode mode)
18660 int extra_cost = 0;
18661 rtx reg, and_arg, plus_arg0, plus_arg1;
18662 addr_mask_type addr_mask;
18663 const char *type = NULL;
18664 const char *fail_msg = NULL;
18666 if (GPR_REG_CLASS_P (rclass))
18667 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
18669 else if (rclass == FLOAT_REGS)
18670 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
18672 else if (rclass == ALTIVEC_REGS)
18673 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
18675 /* For the combined VSX_REGS, turn off Altivec AND -16. */
18676 else if (rclass == VSX_REGS)
18677 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_VMX]
18678 & ~RELOAD_REG_AND_M16);
18680 /* If the register allocator hasn't made up its mind yet on the register
18681 class to use, settle on defaults to use. */
18682 else if (rclass == NO_REGS)
18684 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_ANY]
18685 & ~RELOAD_REG_AND_M16);
18687 if ((addr_mask & RELOAD_REG_MULTIPLE) != 0)
18688 addr_mask &= ~(RELOAD_REG_INDEXED
18689 | RELOAD_REG_PRE_INCDEC
18690 | RELOAD_REG_PRE_MODIFY);
18693 else
18694 addr_mask = 0;
18696 /* If the register isn't valid in this register class, just return now. */
18697 if ((addr_mask & RELOAD_REG_VALID) == 0)
18699 if (TARGET_DEBUG_ADDR)
18701 fprintf (stderr,
18702 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
18703 "not valid in class\n",
18704 GET_MODE_NAME (mode), reg_class_names[rclass]);
18705 debug_rtx (addr);
18708 return -1;
18711 switch (GET_CODE (addr))
18713 /* Does the register class supports auto update forms for this mode? We
18714 don't need a scratch register, since the powerpc only supports
18715 PRE_INC, PRE_DEC, and PRE_MODIFY. */
18716 case PRE_INC:
18717 case PRE_DEC:
18718 reg = XEXP (addr, 0);
18719 if (!base_reg_operand (addr, GET_MODE (reg)))
18721 fail_msg = "no base register #1";
18722 extra_cost = -1;
18725 else if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
18727 extra_cost = 1;
18728 type = "update";
18730 break;
18732 case PRE_MODIFY:
18733 reg = XEXP (addr, 0);
18734 plus_arg1 = XEXP (addr, 1);
18735 if (!base_reg_operand (reg, GET_MODE (reg))
18736 || GET_CODE (plus_arg1) != PLUS
18737 || !rtx_equal_p (reg, XEXP (plus_arg1, 0)))
18739 fail_msg = "bad PRE_MODIFY";
18740 extra_cost = -1;
18743 else if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
18745 extra_cost = 1;
18746 type = "update";
18748 break;
18750 /* Do we need to simulate AND -16 to clear the bottom address bits used
18751 in VMX load/stores? Only allow the AND for vector sizes. */
18752 case AND:
18753 and_arg = XEXP (addr, 0);
18754 if (GET_MODE_SIZE (mode) != 16
18755 || !CONST_INT_P (XEXP (addr, 1))
18756 || INTVAL (XEXP (addr, 1)) != -16)
18758 fail_msg = "bad Altivec AND #1";
18759 extra_cost = -1;
18762 if (rclass != ALTIVEC_REGS)
18764 if (legitimate_indirect_address_p (and_arg, false))
18765 extra_cost = 1;
18767 else if (legitimate_indexed_address_p (and_arg, false))
18768 extra_cost = 2;
18770 else
18772 fail_msg = "bad Altivec AND #2";
18773 extra_cost = -1;
18776 type = "and";
18778 break;
18780 /* If this is an indirect address, make sure it is a base register. */
18781 case REG:
18782 case SUBREG:
18783 if (!legitimate_indirect_address_p (addr, false))
18785 extra_cost = 1;
18786 type = "move";
18788 break;
18790 /* If this is an indexed address, make sure the register class can handle
18791 indexed addresses for this mode. */
18792 case PLUS:
18793 plus_arg0 = XEXP (addr, 0);
18794 plus_arg1 = XEXP (addr, 1);
18796 /* (plus (plus (reg) (constant)) (constant)) is generated during
18797 push_reload processing, so handle it now. */
18798 if (GET_CODE (plus_arg0) == PLUS && CONST_INT_P (plus_arg1))
18800 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
18802 extra_cost = 1;
18803 type = "offset";
18807 /* (plus (plus (reg) (constant)) (reg)) is also generated during
18808 push_reload processing, so handle it now. */
18809 else if (GET_CODE (plus_arg0) == PLUS && REG_P (plus_arg1))
18811 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
18813 extra_cost = 1;
18814 type = "indexed #2";
18818 else if (!base_reg_operand (plus_arg0, GET_MODE (plus_arg0)))
18820 fail_msg = "no base register #2";
18821 extra_cost = -1;
18824 else if (int_reg_operand (plus_arg1, GET_MODE (plus_arg1)))
18826 if ((addr_mask & RELOAD_REG_INDEXED) == 0
18827 || !legitimate_indexed_address_p (addr, false))
18829 extra_cost = 1;
18830 type = "indexed";
18834 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0
18835 && CONST_INT_P (plus_arg1))
18837 if (!quad_address_offset_p (INTVAL (plus_arg1)))
18839 extra_cost = 1;
18840 type = "vector d-form offset";
18844 /* Make sure the register class can handle offset addresses. */
18845 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
18847 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
18849 extra_cost = 1;
18850 type = "offset #2";
18854 else
18856 fail_msg = "bad PLUS";
18857 extra_cost = -1;
18860 break;
18862 case LO_SUM:
18863 /* Quad offsets are restricted and can't handle normal addresses. */
18864 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
18866 extra_cost = -1;
18867 type = "vector d-form lo_sum";
18870 else if (!legitimate_lo_sum_address_p (mode, addr, false))
18872 fail_msg = "bad LO_SUM";
18873 extra_cost = -1;
18876 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
18878 extra_cost = 1;
18879 type = "lo_sum";
18881 break;
18883 /* Static addresses need to create a TOC entry. */
18884 case CONST:
18885 case SYMBOL_REF:
18886 case LABEL_REF:
18887 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
18889 extra_cost = -1;
18890 type = "vector d-form lo_sum #2";
18893 else
18895 type = "address";
18896 extra_cost = rs6000_secondary_reload_toc_costs (addr_mask);
18898 break;
18900 /* TOC references look like offsetable memory. */
18901 case UNSPEC:
18902 if (TARGET_CMODEL == CMODEL_SMALL || XINT (addr, 1) != UNSPEC_TOCREL)
18904 fail_msg = "bad UNSPEC";
18905 extra_cost = -1;
18908 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
18910 extra_cost = -1;
18911 type = "vector d-form lo_sum #3";
18914 else if ((addr_mask & RELOAD_REG_OFFSET) == 0)
18916 extra_cost = 1;
18917 type = "toc reference";
18919 break;
18921 default:
18923 fail_msg = "bad address";
18924 extra_cost = -1;
18928 if (TARGET_DEBUG_ADDR /* && extra_cost != 0 */)
18930 if (extra_cost < 0)
18931 fprintf (stderr,
18932 "rs6000_secondary_reload_memory error: mode = %s, "
18933 "class = %s, addr_mask = '%s', %s\n",
18934 GET_MODE_NAME (mode),
18935 reg_class_names[rclass],
18936 rs6000_debug_addr_mask (addr_mask, false),
18937 (fail_msg != NULL) ? fail_msg : "<bad address>");
18939 else
18940 fprintf (stderr,
18941 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
18942 "addr_mask = '%s', extra cost = %d, %s\n",
18943 GET_MODE_NAME (mode),
18944 reg_class_names[rclass],
18945 rs6000_debug_addr_mask (addr_mask, false),
18946 extra_cost,
18947 (type) ? type : "<none>");
18949 debug_rtx (addr);
18952 return extra_cost;
18955 /* Helper function for rs6000_secondary_reload to return true if a move to a
18956 different register classe is really a simple move. */
18958 static bool
18959 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type,
18960 enum rs6000_reg_type from_type,
18961 machine_mode mode)
18963 int size = GET_MODE_SIZE (mode);
18965 /* Add support for various direct moves available. In this function, we only
18966 look at cases where we don't need any extra registers, and one or more
18967 simple move insns are issued. Originally small integers are not allowed
18968 in FPR/VSX registers. Single precision binary floating is not a simple
18969 move because we need to convert to the single precision memory layout.
18970 The 4-byte SDmode can be moved. TDmode values are disallowed since they
18971 need special direct move handling, which we do not support yet. */
18972 if (TARGET_DIRECT_MOVE
18973 && ((to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
18974 || (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)))
18976 if (TARGET_POWERPC64)
18978 /* ISA 2.07: MTVSRD or MVFVSRD. */
18979 if (size == 8)
18980 return true;
18982 /* ISA 3.0: MTVSRDD or MFVSRD + MFVSRLD. */
18983 if (size == 16 && TARGET_P9_VECTOR && mode != TDmode)
18984 return true;
18987 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
18988 if (TARGET_P8_VECTOR)
18990 if (mode == SImode)
18991 return true;
18993 if (TARGET_P9_VECTOR && (mode == HImode || mode == QImode))
18994 return true;
18997 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
18998 if (mode == SDmode)
18999 return true;
19002 /* Move to/from SPR. */
19003 else if ((size == 4 || (TARGET_POWERPC64 && size == 8))
19004 && ((to_type == GPR_REG_TYPE && from_type == SPR_REG_TYPE)
19005 || (to_type == SPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19006 return true;
19008 return false;
19011 /* Direct move helper function for rs6000_secondary_reload, handle all of the
19012 special direct moves that involve allocating an extra register, return the
19013 insn code of the helper function if there is such a function or
19014 CODE_FOR_nothing if not. */
19016 static bool
19017 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type,
19018 enum rs6000_reg_type from_type,
19019 machine_mode mode,
19020 secondary_reload_info *sri,
19021 bool altivec_p)
19023 bool ret = false;
19024 enum insn_code icode = CODE_FOR_nothing;
19025 int cost = 0;
19026 int size = GET_MODE_SIZE (mode);
19028 if (TARGET_POWERPC64 && size == 16)
19030 /* Handle moving 128-bit values from GPRs to VSX point registers on
19031 ISA 2.07 (power8, power9) when running in 64-bit mode using
19032 XXPERMDI to glue the two 64-bit values back together. */
19033 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19035 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
19036 icode = reg_addr[mode].reload_vsx_gpr;
19039 /* Handle moving 128-bit values from VSX point registers to GPRs on
19040 ISA 2.07 when running in 64-bit mode using XXPERMDI to get access to the
19041 bottom 64-bit value. */
19042 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19044 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
19045 icode = reg_addr[mode].reload_gpr_vsx;
19049 else if (TARGET_POWERPC64 && mode == SFmode)
19051 if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19053 cost = 3; /* xscvdpspn, mfvsrd, and. */
19054 icode = reg_addr[mode].reload_gpr_vsx;
19057 else if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19059 cost = 2; /* mtvsrz, xscvspdpn. */
19060 icode = reg_addr[mode].reload_vsx_gpr;
19064 else if (!TARGET_POWERPC64 && size == 8)
19066 /* Handle moving 64-bit values from GPRs to floating point registers on
19067 ISA 2.07 when running in 32-bit mode using FMRGOW to glue the two
19068 32-bit values back together. Altivec register classes must be handled
19069 specially since a different instruction is used, and the secondary
19070 reload support requires a single instruction class in the scratch
19071 register constraint. However, right now TFmode is not allowed in
19072 Altivec registers, so the pattern will never match. */
19073 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE && !altivec_p)
19075 cost = 3; /* 2 mtvsrwz's, 1 fmrgow. */
19076 icode = reg_addr[mode].reload_fpr_gpr;
19080 if (icode != CODE_FOR_nothing)
19082 ret = true;
19083 if (sri)
19085 sri->icode = icode;
19086 sri->extra_cost = cost;
19090 return ret;
19093 /* Return whether a move between two register classes can be done either
19094 directly (simple move) or via a pattern that uses a single extra temporary
19095 (using ISA 2.07's direct move in this case. */
19097 static bool
19098 rs6000_secondary_reload_move (enum rs6000_reg_type to_type,
19099 enum rs6000_reg_type from_type,
19100 machine_mode mode,
19101 secondary_reload_info *sri,
19102 bool altivec_p)
19104 /* Fall back to load/store reloads if either type is not a register. */
19105 if (to_type == NO_REG_TYPE || from_type == NO_REG_TYPE)
19106 return false;
19108 /* If we haven't allocated registers yet, assume the move can be done for the
19109 standard register types. */
19110 if ((to_type == PSEUDO_REG_TYPE && from_type == PSEUDO_REG_TYPE)
19111 || (to_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (from_type))
19112 || (from_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (to_type)))
19113 return true;
19115 /* Moves to the same set of registers is a simple move for non-specialized
19116 registers. */
19117 if (to_type == from_type && IS_STD_REG_TYPE (to_type))
19118 return true;
19120 /* Check whether a simple move can be done directly. */
19121 if (rs6000_secondary_reload_simple_move (to_type, from_type, mode))
19123 if (sri)
19125 sri->icode = CODE_FOR_nothing;
19126 sri->extra_cost = 0;
19128 return true;
19131 /* Now check if we can do it in a few steps. */
19132 return rs6000_secondary_reload_direct_move (to_type, from_type, mode, sri,
19133 altivec_p);
19136 /* Inform reload about cases where moving X with a mode MODE to a register in
19137 RCLASS requires an extra scratch or immediate register. Return the class
19138 needed for the immediate register.
19140 For VSX and Altivec, we may need a register to convert sp+offset into
19141 reg+sp.
19143 For misaligned 64-bit gpr loads and stores we need a register to
19144 convert an offset address to indirect. */
19146 static reg_class_t
19147 rs6000_secondary_reload (bool in_p,
19148 rtx x,
19149 reg_class_t rclass_i,
19150 machine_mode mode,
19151 secondary_reload_info *sri)
19153 enum reg_class rclass = (enum reg_class) rclass_i;
19154 reg_class_t ret = ALL_REGS;
19155 enum insn_code icode;
19156 bool default_p = false;
19157 bool done_p = false;
19159 /* Allow subreg of memory before/during reload. */
19160 bool memory_p = (MEM_P (x)
19161 || (!reload_completed && SUBREG_P (x)
19162 && MEM_P (SUBREG_REG (x))));
19164 sri->icode = CODE_FOR_nothing;
19165 sri->t_icode = CODE_FOR_nothing;
19166 sri->extra_cost = 0;
19167 icode = ((in_p)
19168 ? reg_addr[mode].reload_load
19169 : reg_addr[mode].reload_store);
19171 if (REG_P (x) || register_operand (x, mode))
19173 enum rs6000_reg_type to_type = reg_class_to_reg_type[(int)rclass];
19174 bool altivec_p = (rclass == ALTIVEC_REGS);
19175 enum rs6000_reg_type from_type = register_to_reg_type (x, &altivec_p);
19177 if (!in_p)
19178 std::swap (to_type, from_type);
19180 /* Can we do a direct move of some sort? */
19181 if (rs6000_secondary_reload_move (to_type, from_type, mode, sri,
19182 altivec_p))
19184 icode = (enum insn_code)sri->icode;
19185 default_p = false;
19186 done_p = true;
19187 ret = NO_REGS;
19191 /* Make sure 0.0 is not reloaded or forced into memory. */
19192 if (x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
19194 ret = NO_REGS;
19195 default_p = false;
19196 done_p = true;
19199 /* If this is a scalar floating point value and we want to load it into the
19200 traditional Altivec registers, do it via a move via a traditional floating
19201 point register, unless we have D-form addressing. Also make sure that
19202 non-zero constants use a FPR. */
19203 if (!done_p && reg_addr[mode].scalar_in_vmx_p
19204 && !mode_supports_vmx_dform (mode)
19205 && (rclass == VSX_REGS || rclass == ALTIVEC_REGS)
19206 && (memory_p || CONST_DOUBLE_P (x)))
19208 ret = FLOAT_REGS;
19209 default_p = false;
19210 done_p = true;
19213 /* Handle reload of load/stores if we have reload helper functions. */
19214 if (!done_p && icode != CODE_FOR_nothing && memory_p)
19216 int extra_cost = rs6000_secondary_reload_memory (XEXP (x, 0), rclass,
19217 mode);
19219 if (extra_cost >= 0)
19221 done_p = true;
19222 ret = NO_REGS;
19223 if (extra_cost > 0)
19225 sri->extra_cost = extra_cost;
19226 sri->icode = icode;
19231 /* Handle unaligned loads and stores of integer registers. */
19232 if (!done_p && TARGET_POWERPC64
19233 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19234 && memory_p
19235 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
19237 rtx addr = XEXP (x, 0);
19238 rtx off = address_offset (addr);
19240 if (off != NULL_RTX)
19242 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19243 unsigned HOST_WIDE_INT offset = INTVAL (off);
19245 /* We need a secondary reload when our legitimate_address_p
19246 says the address is good (as otherwise the entire address
19247 will be reloaded), and the offset is not a multiple of
19248 four or we have an address wrap. Address wrap will only
19249 occur for LO_SUMs since legitimate_offset_address_p
19250 rejects addresses for 16-byte mems that will wrap. */
19251 if (GET_CODE (addr) == LO_SUM
19252 ? (1 /* legitimate_address_p allows any offset for lo_sum */
19253 && ((offset & 3) != 0
19254 || ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra))
19255 : (offset + 0x8000 < 0x10000 - extra /* legitimate_address_p */
19256 && (offset & 3) != 0))
19258 /* -m32 -mpowerpc64 needs to use a 32-bit scratch register. */
19259 if (in_p)
19260 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_load
19261 : CODE_FOR_reload_di_load);
19262 else
19263 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_store
19264 : CODE_FOR_reload_di_store);
19265 sri->extra_cost = 2;
19266 ret = NO_REGS;
19267 done_p = true;
19269 else
19270 default_p = true;
19272 else
19273 default_p = true;
19276 if (!done_p && !TARGET_POWERPC64
19277 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19278 && memory_p
19279 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
19281 rtx addr = XEXP (x, 0);
19282 rtx off = address_offset (addr);
19284 if (off != NULL_RTX)
19286 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19287 unsigned HOST_WIDE_INT offset = INTVAL (off);
19289 /* We need a secondary reload when our legitimate_address_p
19290 says the address is good (as otherwise the entire address
19291 will be reloaded), and we have a wrap.
19293 legitimate_lo_sum_address_p allows LO_SUM addresses to
19294 have any offset so test for wrap in the low 16 bits.
19296 legitimate_offset_address_p checks for the range
19297 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
19298 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
19299 [0x7ff4,0x7fff] respectively, so test for the
19300 intersection of these ranges, [0x7ffc,0x7fff] and
19301 [0x7ff4,0x7ff7] respectively.
19303 Note that the address we see here may have been
19304 manipulated by legitimize_reload_address. */
19305 if (GET_CODE (addr) == LO_SUM
19306 ? ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra
19307 : offset - (0x8000 - extra) < UNITS_PER_WORD)
19309 if (in_p)
19310 sri->icode = CODE_FOR_reload_si_load;
19311 else
19312 sri->icode = CODE_FOR_reload_si_store;
19313 sri->extra_cost = 2;
19314 ret = NO_REGS;
19315 done_p = true;
19317 else
19318 default_p = true;
19320 else
19321 default_p = true;
19324 if (!done_p)
19325 default_p = true;
19327 if (default_p)
19328 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
19330 gcc_assert (ret != ALL_REGS);
19332 if (TARGET_DEBUG_ADDR)
19334 fprintf (stderr,
19335 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
19336 "mode = %s",
19337 reg_class_names[ret],
19338 in_p ? "true" : "false",
19339 reg_class_names[rclass],
19340 GET_MODE_NAME (mode));
19342 if (reload_completed)
19343 fputs (", after reload", stderr);
19345 if (!done_p)
19346 fputs (", done_p not set", stderr);
19348 if (default_p)
19349 fputs (", default secondary reload", stderr);
19351 if (sri->icode != CODE_FOR_nothing)
19352 fprintf (stderr, ", reload func = %s, extra cost = %d",
19353 insn_data[sri->icode].name, sri->extra_cost);
19355 else if (sri->extra_cost > 0)
19356 fprintf (stderr, ", extra cost = %d", sri->extra_cost);
19358 fputs ("\n", stderr);
19359 debug_rtx (x);
19362 return ret;
19365 /* Better tracing for rs6000_secondary_reload_inner. */
19367 static void
19368 rs6000_secondary_reload_trace (int line, rtx reg, rtx mem, rtx scratch,
19369 bool store_p)
19371 rtx set, clobber;
19373 gcc_assert (reg != NULL_RTX && mem != NULL_RTX && scratch != NULL_RTX);
19375 fprintf (stderr, "rs6000_secondary_reload_inner:%d, type = %s\n", line,
19376 store_p ? "store" : "load");
19378 if (store_p)
19379 set = gen_rtx_SET (mem, reg);
19380 else
19381 set = gen_rtx_SET (reg, mem);
19383 clobber = gen_rtx_CLOBBER (VOIDmode, scratch);
19384 debug_rtx (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber)));
19387 static void rs6000_secondary_reload_fail (int, rtx, rtx, rtx, bool)
19388 ATTRIBUTE_NORETURN;
19390 static void
19391 rs6000_secondary_reload_fail (int line, rtx reg, rtx mem, rtx scratch,
19392 bool store_p)
19394 rs6000_secondary_reload_trace (line, reg, mem, scratch, store_p);
19395 gcc_unreachable ();
19398 /* Fixup reload addresses for values in GPR, FPR, and VMX registers that have
19399 reload helper functions. These were identified in
19400 rs6000_secondary_reload_memory, and if reload decided to use the secondary
19401 reload, it calls the insns:
19402 reload_<RELOAD:mode>_<P:mptrsize>_store
19403 reload_<RELOAD:mode>_<P:mptrsize>_load
19405 which in turn calls this function, to do whatever is necessary to create
19406 valid addresses. */
19408 void
19409 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
19411 int regno = true_regnum (reg);
19412 machine_mode mode = GET_MODE (reg);
19413 addr_mask_type addr_mask;
19414 rtx addr;
19415 rtx new_addr;
19416 rtx op_reg, op0, op1;
19417 rtx and_op;
19418 rtx cc_clobber;
19419 rtvec rv;
19421 if (regno < 0 || !HARD_REGISTER_NUM_P (regno) || !MEM_P (mem)
19422 || !base_reg_operand (scratch, GET_MODE (scratch)))
19423 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19425 if (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO))
19426 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
19428 else if (IN_RANGE (regno, FIRST_FPR_REGNO, LAST_FPR_REGNO))
19429 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
19431 else if (IN_RANGE (regno, FIRST_ALTIVEC_REGNO, LAST_ALTIVEC_REGNO))
19432 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
19434 else
19435 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19437 /* Make sure the mode is valid in this register class. */
19438 if ((addr_mask & RELOAD_REG_VALID) == 0)
19439 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19441 if (TARGET_DEBUG_ADDR)
19442 rs6000_secondary_reload_trace (__LINE__, reg, mem, scratch, store_p);
19444 new_addr = addr = XEXP (mem, 0);
19445 switch (GET_CODE (addr))
19447 /* Does the register class support auto update forms for this mode? If
19448 not, do the update now. We don't need a scratch register, since the
19449 powerpc only supports PRE_INC, PRE_DEC, and PRE_MODIFY. */
19450 case PRE_INC:
19451 case PRE_DEC:
19452 op_reg = XEXP (addr, 0);
19453 if (!base_reg_operand (op_reg, Pmode))
19454 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19456 if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
19458 int delta = GET_MODE_SIZE (mode);
19459 if (GET_CODE (addr) == PRE_DEC)
19460 delta = -delta;
19461 emit_insn (gen_add2_insn (op_reg, GEN_INT (delta)));
19462 new_addr = op_reg;
19464 break;
19466 case PRE_MODIFY:
19467 op0 = XEXP (addr, 0);
19468 op1 = XEXP (addr, 1);
19469 if (!base_reg_operand (op0, Pmode)
19470 || GET_CODE (op1) != PLUS
19471 || !rtx_equal_p (op0, XEXP (op1, 0)))
19472 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19474 if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
19476 emit_insn (gen_rtx_SET (op0, op1));
19477 new_addr = reg;
19479 break;
19481 /* Do we need to simulate AND -16 to clear the bottom address bits used
19482 in VMX load/stores? */
19483 case AND:
19484 op0 = XEXP (addr, 0);
19485 op1 = XEXP (addr, 1);
19486 if ((addr_mask & RELOAD_REG_AND_M16) == 0)
19488 if (REG_P (op0) || SUBREG_P (op0))
19489 op_reg = op0;
19491 else if (GET_CODE (op1) == PLUS)
19493 emit_insn (gen_rtx_SET (scratch, op1));
19494 op_reg = scratch;
19497 else
19498 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19500 and_op = gen_rtx_AND (GET_MODE (scratch), op_reg, op1);
19501 cc_clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (CCmode));
19502 rv = gen_rtvec (2, gen_rtx_SET (scratch, and_op), cc_clobber);
19503 emit_insn (gen_rtx_PARALLEL (VOIDmode, rv));
19504 new_addr = scratch;
19506 break;
19508 /* If this is an indirect address, make sure it is a base register. */
19509 case REG:
19510 case SUBREG:
19511 if (!base_reg_operand (addr, GET_MODE (addr)))
19513 emit_insn (gen_rtx_SET (scratch, addr));
19514 new_addr = scratch;
19516 break;
19518 /* If this is an indexed address, make sure the register class can handle
19519 indexed addresses for this mode. */
19520 case PLUS:
19521 op0 = XEXP (addr, 0);
19522 op1 = XEXP (addr, 1);
19523 if (!base_reg_operand (op0, Pmode))
19524 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19526 else if (int_reg_operand (op1, Pmode))
19528 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19530 emit_insn (gen_rtx_SET (scratch, addr));
19531 new_addr = scratch;
19535 else if (mode_supports_dq_form (mode) && CONST_INT_P (op1))
19537 if (((addr_mask & RELOAD_REG_QUAD_OFFSET) == 0)
19538 || !quad_address_p (addr, mode, false))
19540 emit_insn (gen_rtx_SET (scratch, addr));
19541 new_addr = scratch;
19545 /* Make sure the register class can handle offset addresses. */
19546 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
19548 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19550 emit_insn (gen_rtx_SET (scratch, addr));
19551 new_addr = scratch;
19555 else
19556 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19558 break;
19560 case LO_SUM:
19561 op0 = XEXP (addr, 0);
19562 op1 = XEXP (addr, 1);
19563 if (!base_reg_operand (op0, Pmode))
19564 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19566 else if (int_reg_operand (op1, Pmode))
19568 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19570 emit_insn (gen_rtx_SET (scratch, addr));
19571 new_addr = scratch;
19575 /* Quad offsets are restricted and can't handle normal addresses. */
19576 else if (mode_supports_dq_form (mode))
19578 emit_insn (gen_rtx_SET (scratch, addr));
19579 new_addr = scratch;
19582 /* Make sure the register class can handle offset addresses. */
19583 else if (legitimate_lo_sum_address_p (mode, addr, false))
19585 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19587 emit_insn (gen_rtx_SET (scratch, addr));
19588 new_addr = scratch;
19592 else
19593 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19595 break;
19597 case SYMBOL_REF:
19598 case CONST:
19599 case LABEL_REF:
19600 rs6000_emit_move (scratch, addr, Pmode);
19601 new_addr = scratch;
19602 break;
19604 default:
19605 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19608 /* Adjust the address if it changed. */
19609 if (addr != new_addr)
19611 mem = replace_equiv_address_nv (mem, new_addr);
19612 if (TARGET_DEBUG_ADDR)
19613 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
19616 /* Now create the move. */
19617 if (store_p)
19618 emit_insn (gen_rtx_SET (mem, reg));
19619 else
19620 emit_insn (gen_rtx_SET (reg, mem));
19622 return;
19625 /* Convert reloads involving 64-bit gprs and misaligned offset
19626 addressing, or multiple 32-bit gprs and offsets that are too large,
19627 to use indirect addressing. */
19629 void
19630 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
19632 int regno = true_regnum (reg);
19633 enum reg_class rclass;
19634 rtx addr;
19635 rtx scratch_or_premodify = scratch;
19637 if (TARGET_DEBUG_ADDR)
19639 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
19640 store_p ? "store" : "load");
19641 fprintf (stderr, "reg:\n");
19642 debug_rtx (reg);
19643 fprintf (stderr, "mem:\n");
19644 debug_rtx (mem);
19645 fprintf (stderr, "scratch:\n");
19646 debug_rtx (scratch);
19649 gcc_assert (regno >= 0 && HARD_REGISTER_NUM_P (regno));
19650 gcc_assert (MEM_P (mem));
19651 rclass = REGNO_REG_CLASS (regno);
19652 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
19653 addr = XEXP (mem, 0);
19655 if (GET_CODE (addr) == PRE_MODIFY)
19657 gcc_assert (REG_P (XEXP (addr, 0))
19658 && GET_CODE (XEXP (addr, 1)) == PLUS
19659 && XEXP (XEXP (addr, 1), 0) == XEXP (addr, 0));
19660 scratch_or_premodify = XEXP (addr, 0);
19661 addr = XEXP (addr, 1);
19663 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
19665 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
19667 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
19669 /* Now create the move. */
19670 if (store_p)
19671 emit_insn (gen_rtx_SET (mem, reg));
19672 else
19673 emit_insn (gen_rtx_SET (reg, mem));
19675 return;
19678 /* Given an rtx X being reloaded into a reg required to be
19679 in class CLASS, return the class of reg to actually use.
19680 In general this is just CLASS; but on some machines
19681 in some cases it is preferable to use a more restrictive class.
19683 On the RS/6000, we have to return NO_REGS when we want to reload a
19684 floating-point CONST_DOUBLE to force it to be copied to memory.
19686 We also don't want to reload integer values into floating-point
19687 registers if we can at all help it. In fact, this can
19688 cause reload to die, if it tries to generate a reload of CTR
19689 into a FP register and discovers it doesn't have the memory location
19690 required.
19692 ??? Would it be a good idea to have reload do the converse, that is
19693 try to reload floating modes into FP registers if possible?
19696 static enum reg_class
19697 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
19699 machine_mode mode = GET_MODE (x);
19700 bool is_constant = CONSTANT_P (x);
19702 /* If a mode can't go in FPR/ALTIVEC/VSX registers, don't return a preferred
19703 reload class for it. */
19704 if ((rclass == ALTIVEC_REGS || rclass == VSX_REGS)
19705 && (reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID) == 0)
19706 return NO_REGS;
19708 if ((rclass == FLOAT_REGS || rclass == VSX_REGS)
19709 && (reg_addr[mode].addr_mask[RELOAD_REG_FPR] & RELOAD_REG_VALID) == 0)
19710 return NO_REGS;
19712 /* For VSX, see if we should prefer FLOAT_REGS or ALTIVEC_REGS. Do not allow
19713 the reloading of address expressions using PLUS into floating point
19714 registers. */
19715 if (TARGET_VSX && VSX_REG_CLASS_P (rclass) && GET_CODE (x) != PLUS)
19717 if (is_constant)
19719 /* Zero is always allowed in all VSX registers. */
19720 if (x == CONST0_RTX (mode))
19721 return rclass;
19723 /* If this is a vector constant that can be formed with a few Altivec
19724 instructions, we want altivec registers. */
19725 if (GET_CODE (x) == CONST_VECTOR && easy_vector_constant (x, mode))
19726 return ALTIVEC_REGS;
19728 /* If this is an integer constant that can easily be loaded into
19729 vector registers, allow it. */
19730 if (CONST_INT_P (x))
19732 HOST_WIDE_INT value = INTVAL (x);
19734 /* ISA 2.07 can generate -1 in all registers with XXLORC. ISA
19735 2.06 can generate it in the Altivec registers with
19736 VSPLTI<x>. */
19737 if (value == -1)
19739 if (TARGET_P8_VECTOR)
19740 return rclass;
19741 else if (rclass == ALTIVEC_REGS || rclass == VSX_REGS)
19742 return ALTIVEC_REGS;
19743 else
19744 return NO_REGS;
19747 /* ISA 3.0 can load -128..127 using the XXSPLTIB instruction and
19748 a sign extend in the Altivec registers. */
19749 if (IN_RANGE (value, -128, 127) && TARGET_P9_VECTOR
19750 && (rclass == ALTIVEC_REGS || rclass == VSX_REGS))
19751 return ALTIVEC_REGS;
19754 /* Force constant to memory. */
19755 return NO_REGS;
19758 /* D-form addressing can easily reload the value. */
19759 if (mode_supports_vmx_dform (mode)
19760 || mode_supports_dq_form (mode))
19761 return rclass;
19763 /* If this is a scalar floating point value and we don't have D-form
19764 addressing, prefer the traditional floating point registers so that we
19765 can use D-form (register+offset) addressing. */
19766 if (rclass == VSX_REGS
19767 && (mode == SFmode || GET_MODE_SIZE (mode) == 8))
19768 return FLOAT_REGS;
19770 /* Prefer the Altivec registers if Altivec is handling the vector
19771 operations (i.e. V16QI, V8HI, and V4SI), or if we prefer Altivec
19772 loads. */
19773 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode)
19774 || mode == V1TImode)
19775 return ALTIVEC_REGS;
19777 return rclass;
19780 if (is_constant || GET_CODE (x) == PLUS)
19782 if (reg_class_subset_p (GENERAL_REGS, rclass))
19783 return GENERAL_REGS;
19784 if (reg_class_subset_p (BASE_REGS, rclass))
19785 return BASE_REGS;
19786 return NO_REGS;
19789 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == GEN_OR_FLOAT_REGS)
19790 return GENERAL_REGS;
19792 return rclass;
19795 /* Debug version of rs6000_preferred_reload_class. */
19796 static enum reg_class
19797 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
19799 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
19801 fprintf (stderr,
19802 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
19803 "mode = %s, x:\n",
19804 reg_class_names[ret], reg_class_names[rclass],
19805 GET_MODE_NAME (GET_MODE (x)));
19806 debug_rtx (x);
19808 return ret;
19811 /* If we are copying between FP or AltiVec registers and anything else, we need
19812 a memory location. The exception is when we are targeting ppc64 and the
19813 move to/from fpr to gpr instructions are available. Also, under VSX, you
19814 can copy vector registers from the FP register set to the Altivec register
19815 set and vice versa. */
19817 static bool
19818 rs6000_secondary_memory_needed (machine_mode mode,
19819 reg_class_t from_class,
19820 reg_class_t to_class)
19822 enum rs6000_reg_type from_type, to_type;
19823 bool altivec_p = ((from_class == ALTIVEC_REGS)
19824 || (to_class == ALTIVEC_REGS));
19826 /* If a simple/direct move is available, we don't need secondary memory */
19827 from_type = reg_class_to_reg_type[(int)from_class];
19828 to_type = reg_class_to_reg_type[(int)to_class];
19830 if (rs6000_secondary_reload_move (to_type, from_type, mode,
19831 (secondary_reload_info *)0, altivec_p))
19832 return false;
19834 /* If we have a floating point or vector register class, we need to use
19835 memory to transfer the data. */
19836 if (IS_FP_VECT_REG_TYPE (from_type) || IS_FP_VECT_REG_TYPE (to_type))
19837 return true;
19839 return false;
19842 /* Debug version of rs6000_secondary_memory_needed. */
19843 static bool
19844 rs6000_debug_secondary_memory_needed (machine_mode mode,
19845 reg_class_t from_class,
19846 reg_class_t to_class)
19848 bool ret = rs6000_secondary_memory_needed (mode, from_class, to_class);
19850 fprintf (stderr,
19851 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
19852 "to_class = %s, mode = %s\n",
19853 ret ? "true" : "false",
19854 reg_class_names[from_class],
19855 reg_class_names[to_class],
19856 GET_MODE_NAME (mode));
19858 return ret;
19861 /* Return the register class of a scratch register needed to copy IN into
19862 or out of a register in RCLASS in MODE. If it can be done directly,
19863 NO_REGS is returned. */
19865 static enum reg_class
19866 rs6000_secondary_reload_class (enum reg_class rclass, machine_mode mode,
19867 rtx in)
19869 int regno;
19871 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
19872 #if TARGET_MACHO
19873 && MACHOPIC_INDIRECT
19874 #endif
19877 /* We cannot copy a symbolic operand directly into anything
19878 other than BASE_REGS for TARGET_ELF. So indicate that a
19879 register from BASE_REGS is needed as an intermediate
19880 register.
19882 On Darwin, pic addresses require a load from memory, which
19883 needs a base register. */
19884 if (rclass != BASE_REGS
19885 && (SYMBOL_REF_P (in)
19886 || GET_CODE (in) == HIGH
19887 || GET_CODE (in) == LABEL_REF
19888 || GET_CODE (in) == CONST))
19889 return BASE_REGS;
19892 if (REG_P (in))
19894 regno = REGNO (in);
19895 if (!HARD_REGISTER_NUM_P (regno))
19897 regno = true_regnum (in);
19898 if (!HARD_REGISTER_NUM_P (regno))
19899 regno = -1;
19902 else if (SUBREG_P (in))
19904 regno = true_regnum (in);
19905 if (!HARD_REGISTER_NUM_P (regno))
19906 regno = -1;
19908 else
19909 regno = -1;
19911 /* If we have VSX register moves, prefer moving scalar values between
19912 Altivec registers and GPR by going via an FPR (and then via memory)
19913 instead of reloading the secondary memory address for Altivec moves. */
19914 if (TARGET_VSX
19915 && GET_MODE_SIZE (mode) < 16
19916 && !mode_supports_vmx_dform (mode)
19917 && (((rclass == GENERAL_REGS || rclass == BASE_REGS)
19918 && (regno >= 0 && ALTIVEC_REGNO_P (regno)))
19919 || ((rclass == VSX_REGS || rclass == ALTIVEC_REGS)
19920 && (regno >= 0 && INT_REGNO_P (regno)))))
19921 return FLOAT_REGS;
19923 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
19924 into anything. */
19925 if (rclass == GENERAL_REGS || rclass == BASE_REGS
19926 || (regno >= 0 && INT_REGNO_P (regno)))
19927 return NO_REGS;
19929 /* Constants, memory, and VSX registers can go into VSX registers (both the
19930 traditional floating point and the altivec registers). */
19931 if (rclass == VSX_REGS
19932 && (regno == -1 || VSX_REGNO_P (regno)))
19933 return NO_REGS;
19935 /* Constants, memory, and FP registers can go into FP registers. */
19936 if ((regno == -1 || FP_REGNO_P (regno))
19937 && (rclass == FLOAT_REGS || rclass == GEN_OR_FLOAT_REGS))
19938 return (mode != SDmode || lra_in_progress) ? NO_REGS : GENERAL_REGS;
19940 /* Memory, and AltiVec registers can go into AltiVec registers. */
19941 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
19942 && rclass == ALTIVEC_REGS)
19943 return NO_REGS;
19945 /* We can copy among the CR registers. */
19946 if ((rclass == CR_REGS || rclass == CR0_REGS)
19947 && regno >= 0 && CR_REGNO_P (regno))
19948 return NO_REGS;
19950 /* Otherwise, we need GENERAL_REGS. */
19951 return GENERAL_REGS;
19954 /* Debug version of rs6000_secondary_reload_class. */
19955 static enum reg_class
19956 rs6000_debug_secondary_reload_class (enum reg_class rclass,
19957 machine_mode mode, rtx in)
19959 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
19960 fprintf (stderr,
19961 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
19962 "mode = %s, input rtx:\n",
19963 reg_class_names[ret], reg_class_names[rclass],
19964 GET_MODE_NAME (mode));
19965 debug_rtx (in);
19967 return ret;
19970 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
19972 static bool
19973 rs6000_can_change_mode_class (machine_mode from,
19974 machine_mode to,
19975 reg_class_t rclass)
19977 unsigned from_size = GET_MODE_SIZE (from);
19978 unsigned to_size = GET_MODE_SIZE (to);
19980 if (from_size != to_size)
19982 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
19984 if (reg_classes_intersect_p (xclass, rclass))
19986 unsigned to_nregs = hard_regno_nregs (FIRST_FPR_REGNO, to);
19987 unsigned from_nregs = hard_regno_nregs (FIRST_FPR_REGNO, from);
19988 bool to_float128_vector_p = FLOAT128_VECTOR_P (to);
19989 bool from_float128_vector_p = FLOAT128_VECTOR_P (from);
19991 /* Don't allow 64-bit types to overlap with 128-bit types that take a
19992 single register under VSX because the scalar part of the register
19993 is in the upper 64-bits, and not the lower 64-bits. Types like
19994 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
19995 IEEE floating point can't overlap, and neither can small
19996 values. */
19998 if (to_float128_vector_p && from_float128_vector_p)
19999 return true;
20001 else if (to_float128_vector_p || from_float128_vector_p)
20002 return false;
20004 /* TDmode in floating-mode registers must always go into a register
20005 pair with the most significant word in the even-numbered register
20006 to match ISA requirements. In little-endian mode, this does not
20007 match subreg numbering, so we cannot allow subregs. */
20008 if (!BYTES_BIG_ENDIAN && (to == TDmode || from == TDmode))
20009 return false;
20011 if (from_size < 8 || to_size < 8)
20012 return false;
20014 if (from_size == 8 && (8 * to_nregs) != to_size)
20015 return false;
20017 if (to_size == 8 && (8 * from_nregs) != from_size)
20018 return false;
20020 return true;
20022 else
20023 return true;
20026 /* Since the VSX register set includes traditional floating point registers
20027 and altivec registers, just check for the size being different instead of
20028 trying to check whether the modes are vector modes. Otherwise it won't
20029 allow say DF and DI to change classes. For types like TFmode and TDmode
20030 that take 2 64-bit registers, rather than a single 128-bit register, don't
20031 allow subregs of those types to other 128 bit types. */
20032 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
20034 unsigned num_regs = (from_size + 15) / 16;
20035 if (hard_regno_nregs (FIRST_FPR_REGNO, to) > num_regs
20036 || hard_regno_nregs (FIRST_FPR_REGNO, from) > num_regs)
20037 return false;
20039 return (from_size == 8 || from_size == 16);
20042 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
20043 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
20044 return false;
20046 return true;
20049 /* Debug version of rs6000_can_change_mode_class. */
20050 static bool
20051 rs6000_debug_can_change_mode_class (machine_mode from,
20052 machine_mode to,
20053 reg_class_t rclass)
20055 bool ret = rs6000_can_change_mode_class (from, to, rclass);
20057 fprintf (stderr,
20058 "rs6000_can_change_mode_class, return %s, from = %s, "
20059 "to = %s, rclass = %s\n",
20060 ret ? "true" : "false",
20061 GET_MODE_NAME (from), GET_MODE_NAME (to),
20062 reg_class_names[rclass]);
20064 return ret;
20067 /* Return a string to do a move operation of 128 bits of data. */
20069 const char *
20070 rs6000_output_move_128bit (rtx operands[])
20072 rtx dest = operands[0];
20073 rtx src = operands[1];
20074 machine_mode mode = GET_MODE (dest);
20075 int dest_regno;
20076 int src_regno;
20077 bool dest_gpr_p, dest_fp_p, dest_vmx_p, dest_vsx_p;
20078 bool src_gpr_p, src_fp_p, src_vmx_p, src_vsx_p;
20080 if (REG_P (dest))
20082 dest_regno = REGNO (dest);
20083 dest_gpr_p = INT_REGNO_P (dest_regno);
20084 dest_fp_p = FP_REGNO_P (dest_regno);
20085 dest_vmx_p = ALTIVEC_REGNO_P (dest_regno);
20086 dest_vsx_p = dest_fp_p | dest_vmx_p;
20088 else
20090 dest_regno = -1;
20091 dest_gpr_p = dest_fp_p = dest_vmx_p = dest_vsx_p = false;
20094 if (REG_P (src))
20096 src_regno = REGNO (src);
20097 src_gpr_p = INT_REGNO_P (src_regno);
20098 src_fp_p = FP_REGNO_P (src_regno);
20099 src_vmx_p = ALTIVEC_REGNO_P (src_regno);
20100 src_vsx_p = src_fp_p | src_vmx_p;
20102 else
20104 src_regno = -1;
20105 src_gpr_p = src_fp_p = src_vmx_p = src_vsx_p = false;
20108 /* Register moves. */
20109 if (dest_regno >= 0 && src_regno >= 0)
20111 if (dest_gpr_p)
20113 if (src_gpr_p)
20114 return "#";
20116 if (TARGET_DIRECT_MOVE_128 && src_vsx_p)
20117 return (WORDS_BIG_ENDIAN
20118 ? "mfvsrd %0,%x1\n\tmfvsrld %L0,%x1"
20119 : "mfvsrd %L0,%x1\n\tmfvsrld %0,%x1");
20121 else if (TARGET_VSX && TARGET_DIRECT_MOVE && src_vsx_p)
20122 return "#";
20125 else if (TARGET_VSX && dest_vsx_p)
20127 if (src_vsx_p)
20128 return "xxlor %x0,%x1,%x1";
20130 else if (TARGET_DIRECT_MOVE_128 && src_gpr_p)
20131 return (WORDS_BIG_ENDIAN
20132 ? "mtvsrdd %x0,%1,%L1"
20133 : "mtvsrdd %x0,%L1,%1");
20135 else if (TARGET_DIRECT_MOVE && src_gpr_p)
20136 return "#";
20139 else if (TARGET_ALTIVEC && dest_vmx_p && src_vmx_p)
20140 return "vor %0,%1,%1";
20142 else if (dest_fp_p && src_fp_p)
20143 return "#";
20146 /* Loads. */
20147 else if (dest_regno >= 0 && MEM_P (src))
20149 if (dest_gpr_p)
20151 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20152 return "lq %0,%1";
20153 else
20154 return "#";
20157 else if (TARGET_ALTIVEC && dest_vmx_p
20158 && altivec_indexed_or_indirect_operand (src, mode))
20159 return "lvx %0,%y1";
20161 else if (TARGET_VSX && dest_vsx_p)
20163 if (mode_supports_dq_form (mode)
20164 && quad_address_p (XEXP (src, 0), mode, true))
20165 return "lxv %x0,%1";
20167 else if (TARGET_P9_VECTOR)
20168 return "lxvx %x0,%y1";
20170 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20171 return "lxvw4x %x0,%y1";
20173 else
20174 return "lxvd2x %x0,%y1";
20177 else if (TARGET_ALTIVEC && dest_vmx_p)
20178 return "lvx %0,%y1";
20180 else if (dest_fp_p)
20181 return "#";
20184 /* Stores. */
20185 else if (src_regno >= 0 && MEM_P (dest))
20187 if (src_gpr_p)
20189 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20190 return "stq %1,%0";
20191 else
20192 return "#";
20195 else if (TARGET_ALTIVEC && src_vmx_p
20196 && altivec_indexed_or_indirect_operand (dest, mode))
20197 return "stvx %1,%y0";
20199 else if (TARGET_VSX && src_vsx_p)
20201 if (mode_supports_dq_form (mode)
20202 && quad_address_p (XEXP (dest, 0), mode, true))
20203 return "stxv %x1,%0";
20205 else if (TARGET_P9_VECTOR)
20206 return "stxvx %x1,%y0";
20208 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20209 return "stxvw4x %x1,%y0";
20211 else
20212 return "stxvd2x %x1,%y0";
20215 else if (TARGET_ALTIVEC && src_vmx_p)
20216 return "stvx %1,%y0";
20218 else if (src_fp_p)
20219 return "#";
20222 /* Constants. */
20223 else if (dest_regno >= 0
20224 && (CONST_INT_P (src)
20225 || CONST_WIDE_INT_P (src)
20226 || CONST_DOUBLE_P (src)
20227 || GET_CODE (src) == CONST_VECTOR))
20229 if (dest_gpr_p)
20230 return "#";
20232 else if ((dest_vmx_p && TARGET_ALTIVEC)
20233 || (dest_vsx_p && TARGET_VSX))
20234 return output_vec_const_move (operands);
20237 fatal_insn ("Bad 128-bit move", gen_rtx_SET (dest, src));
20240 /* Validate a 128-bit move. */
20241 bool
20242 rs6000_move_128bit_ok_p (rtx operands[])
20244 machine_mode mode = GET_MODE (operands[0]);
20245 return (gpc_reg_operand (operands[0], mode)
20246 || gpc_reg_operand (operands[1], mode));
20249 /* Return true if a 128-bit move needs to be split. */
20250 bool
20251 rs6000_split_128bit_ok_p (rtx operands[])
20253 if (!reload_completed)
20254 return false;
20256 if (!gpr_or_gpr_p (operands[0], operands[1]))
20257 return false;
20259 if (quad_load_store_p (operands[0], operands[1]))
20260 return false;
20262 return true;
20266 /* Given a comparison operation, return the bit number in CCR to test. We
20267 know this is a valid comparison.
20269 SCC_P is 1 if this is for an scc. That means that %D will have been
20270 used instead of %C, so the bits will be in different places.
20272 Return -1 if OP isn't a valid comparison for some reason. */
20275 ccr_bit (rtx op, int scc_p)
20277 enum rtx_code code = GET_CODE (op);
20278 machine_mode cc_mode;
20279 int cc_regnum;
20280 int base_bit;
20281 rtx reg;
20283 if (!COMPARISON_P (op))
20284 return -1;
20286 reg = XEXP (op, 0);
20288 if (!REG_P (reg) || !CR_REGNO_P (REGNO (reg)))
20289 return -1;
20291 cc_mode = GET_MODE (reg);
20292 cc_regnum = REGNO (reg);
20293 base_bit = 4 * (cc_regnum - CR0_REGNO);
20295 validate_condition_mode (code, cc_mode);
20297 /* When generating a sCOND operation, only positive conditions are
20298 allowed. */
20299 if (scc_p)
20300 switch (code)
20302 case EQ:
20303 case GT:
20304 case LT:
20305 case UNORDERED:
20306 case GTU:
20307 case LTU:
20308 break;
20309 default:
20310 return -1;
20313 switch (code)
20315 case NE:
20316 return scc_p ? base_bit + 3 : base_bit + 2;
20317 case EQ:
20318 return base_bit + 2;
20319 case GT: case GTU: case UNLE:
20320 return base_bit + 1;
20321 case LT: case LTU: case UNGE:
20322 return base_bit;
20323 case ORDERED: case UNORDERED:
20324 return base_bit + 3;
20326 case GE: case GEU:
20327 /* If scc, we will have done a cror to put the bit in the
20328 unordered position. So test that bit. For integer, this is ! LT
20329 unless this is an scc insn. */
20330 return scc_p ? base_bit + 3 : base_bit;
20332 case LE: case LEU:
20333 return scc_p ? base_bit + 3 : base_bit + 1;
20335 default:
20336 return -1;
20340 /* Return the GOT register. */
20343 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
20345 /* The second flow pass currently (June 1999) can't update
20346 regs_ever_live without disturbing other parts of the compiler, so
20347 update it here to make the prolog/epilogue code happy. */
20348 if (!can_create_pseudo_p ()
20349 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
20350 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
20352 crtl->uses_pic_offset_table = 1;
20354 return pic_offset_table_rtx;
20357 #define INT_P(X) (CONST_INT_P (X) && GET_MODE (X) == VOIDmode)
20359 /* Write out a function code label. */
20361 void
20362 rs6000_output_function_entry (FILE *file, const char *fname)
20364 if (fname[0] != '.')
20366 switch (DEFAULT_ABI)
20368 default:
20369 gcc_unreachable ();
20371 case ABI_AIX:
20372 if (DOT_SYMBOLS)
20373 putc ('.', file);
20374 else
20375 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
20376 break;
20378 case ABI_ELFv2:
20379 case ABI_V4:
20380 case ABI_DARWIN:
20381 break;
20385 RS6000_OUTPUT_BASENAME (file, fname);
20388 /* Print an operand. Recognize special options, documented below. */
20390 #if TARGET_ELF
20391 /* Access to .sdata2 through r2 (see -msdata=eabi in invoke.texi) is
20392 only introduced by the linker, when applying the sda21
20393 relocation. */
20394 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
20395 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
20396 #else
20397 #define SMALL_DATA_RELOC "sda21"
20398 #define SMALL_DATA_REG 0
20399 #endif
20401 void
20402 print_operand (FILE *file, rtx x, int code)
20404 int i;
20405 unsigned HOST_WIDE_INT uval;
20407 switch (code)
20409 /* %a is output_address. */
20411 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
20412 output_operand. */
20414 case 'D':
20415 /* Like 'J' but get to the GT bit only. */
20416 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20418 output_operand_lossage ("invalid %%D value");
20419 return;
20422 /* Bit 1 is GT bit. */
20423 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
20425 /* Add one for shift count in rlinm for scc. */
20426 fprintf (file, "%d", i + 1);
20427 return;
20429 case 'e':
20430 /* If the low 16 bits are 0, but some other bit is set, write 's'. */
20431 if (! INT_P (x))
20433 output_operand_lossage ("invalid %%e value");
20434 return;
20437 uval = INTVAL (x);
20438 if ((uval & 0xffff) == 0 && uval != 0)
20439 putc ('s', file);
20440 return;
20442 case 'E':
20443 /* X is a CR register. Print the number of the EQ bit of the CR */
20444 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20445 output_operand_lossage ("invalid %%E value");
20446 else
20447 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
20448 return;
20450 case 'f':
20451 /* X is a CR register. Print the shift count needed to move it
20452 to the high-order four bits. */
20453 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20454 output_operand_lossage ("invalid %%f value");
20455 else
20456 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
20457 return;
20459 case 'F':
20460 /* Similar, but print the count for the rotate in the opposite
20461 direction. */
20462 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20463 output_operand_lossage ("invalid %%F value");
20464 else
20465 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
20466 return;
20468 case 'G':
20469 /* X is a constant integer. If it is negative, print "m",
20470 otherwise print "z". This is to make an aze or ame insn. */
20471 if (!CONST_INT_P (x))
20472 output_operand_lossage ("invalid %%G value");
20473 else if (INTVAL (x) >= 0)
20474 putc ('z', file);
20475 else
20476 putc ('m', file);
20477 return;
20479 case 'h':
20480 /* If constant, output low-order five bits. Otherwise, write
20481 normally. */
20482 if (INT_P (x))
20483 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 31);
20484 else
20485 print_operand (file, x, 0);
20486 return;
20488 case 'H':
20489 /* If constant, output low-order six bits. Otherwise, write
20490 normally. */
20491 if (INT_P (x))
20492 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 63);
20493 else
20494 print_operand (file, x, 0);
20495 return;
20497 case 'I':
20498 /* Print `i' if this is a constant, else nothing. */
20499 if (INT_P (x))
20500 putc ('i', file);
20501 return;
20503 case 'j':
20504 /* Write the bit number in CCR for jump. */
20505 i = ccr_bit (x, 0);
20506 if (i == -1)
20507 output_operand_lossage ("invalid %%j code");
20508 else
20509 fprintf (file, "%d", i);
20510 return;
20512 case 'J':
20513 /* Similar, but add one for shift count in rlinm for scc and pass
20514 scc flag to `ccr_bit'. */
20515 i = ccr_bit (x, 1);
20516 if (i == -1)
20517 output_operand_lossage ("invalid %%J code");
20518 else
20519 /* If we want bit 31, write a shift count of zero, not 32. */
20520 fprintf (file, "%d", i == 31 ? 0 : i + 1);
20521 return;
20523 case 'k':
20524 /* X must be a constant. Write the 1's complement of the
20525 constant. */
20526 if (! INT_P (x))
20527 output_operand_lossage ("invalid %%k value");
20528 else
20529 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
20530 return;
20532 case 'K':
20533 /* X must be a symbolic constant on ELF. Write an
20534 expression suitable for an 'addi' that adds in the low 16
20535 bits of the MEM. */
20536 if (GET_CODE (x) == CONST)
20538 if (GET_CODE (XEXP (x, 0)) != PLUS
20539 || (!SYMBOL_REF_P (XEXP (XEXP (x, 0), 0))
20540 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
20541 || !CONST_INT_P (XEXP (XEXP (x, 0), 1)))
20542 output_operand_lossage ("invalid %%K value");
20544 print_operand_address (file, x);
20545 fputs ("@l", file);
20546 return;
20548 /* %l is output_asm_label. */
20550 case 'L':
20551 /* Write second word of DImode or DFmode reference. Works on register
20552 or non-indexed memory only. */
20553 if (REG_P (x))
20554 fputs (reg_names[REGNO (x) + 1], file);
20555 else if (MEM_P (x))
20557 machine_mode mode = GET_MODE (x);
20558 /* Handle possible auto-increment. Since it is pre-increment and
20559 we have already done it, we can just use an offset of word. */
20560 if (GET_CODE (XEXP (x, 0)) == PRE_INC
20561 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
20562 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
20563 UNITS_PER_WORD));
20564 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
20565 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
20566 UNITS_PER_WORD));
20567 else
20568 output_address (mode, XEXP (adjust_address_nv (x, SImode,
20569 UNITS_PER_WORD),
20570 0));
20572 if (small_data_operand (x, GET_MODE (x)))
20573 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
20574 reg_names[SMALL_DATA_REG]);
20576 return;
20578 case 'N': /* Unused */
20579 /* Write the number of elements in the vector times 4. */
20580 if (GET_CODE (x) != PARALLEL)
20581 output_operand_lossage ("invalid %%N value");
20582 else
20583 fprintf (file, "%d", XVECLEN (x, 0) * 4);
20584 return;
20586 case 'O': /* Unused */
20587 /* Similar, but subtract 1 first. */
20588 if (GET_CODE (x) != PARALLEL)
20589 output_operand_lossage ("invalid %%O value");
20590 else
20591 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
20592 return;
20594 case 'p':
20595 /* X is a CONST_INT that is a power of two. Output the logarithm. */
20596 if (! INT_P (x)
20597 || INTVAL (x) < 0
20598 || (i = exact_log2 (INTVAL (x))) < 0)
20599 output_operand_lossage ("invalid %%p value");
20600 else
20601 fprintf (file, "%d", i);
20602 return;
20604 case 'P':
20605 /* The operand must be an indirect memory reference. The result
20606 is the register name. */
20607 if (!MEM_P (x) || !REG_P (XEXP (x, 0))
20608 || REGNO (XEXP (x, 0)) >= 32)
20609 output_operand_lossage ("invalid %%P value");
20610 else
20611 fputs (reg_names[REGNO (XEXP (x, 0))], file);
20612 return;
20614 case 'q':
20615 /* This outputs the logical code corresponding to a boolean
20616 expression. The expression may have one or both operands
20617 negated (if one, only the first one). For condition register
20618 logical operations, it will also treat the negated
20619 CR codes as NOTs, but not handle NOTs of them. */
20621 const char *const *t = 0;
20622 const char *s;
20623 enum rtx_code code = GET_CODE (x);
20624 static const char * const tbl[3][3] = {
20625 { "and", "andc", "nor" },
20626 { "or", "orc", "nand" },
20627 { "xor", "eqv", "xor" } };
20629 if (code == AND)
20630 t = tbl[0];
20631 else if (code == IOR)
20632 t = tbl[1];
20633 else if (code == XOR)
20634 t = tbl[2];
20635 else
20636 output_operand_lossage ("invalid %%q value");
20638 if (GET_CODE (XEXP (x, 0)) != NOT)
20639 s = t[0];
20640 else
20642 if (GET_CODE (XEXP (x, 1)) == NOT)
20643 s = t[2];
20644 else
20645 s = t[1];
20648 fputs (s, file);
20650 return;
20652 case 'Q':
20653 if (! TARGET_MFCRF)
20654 return;
20655 fputc (',', file);
20656 /* FALLTHRU */
20658 case 'R':
20659 /* X is a CR register. Print the mask for `mtcrf'. */
20660 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20661 output_operand_lossage ("invalid %%R value");
20662 else
20663 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
20664 return;
20666 case 's':
20667 /* Low 5 bits of 32 - value */
20668 if (! INT_P (x))
20669 output_operand_lossage ("invalid %%s value");
20670 else
20671 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INTVAL (x)) & 31);
20672 return;
20674 case 't':
20675 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
20676 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20678 output_operand_lossage ("invalid %%t value");
20679 return;
20682 /* Bit 3 is OV bit. */
20683 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
20685 /* If we want bit 31, write a shift count of zero, not 32. */
20686 fprintf (file, "%d", i == 31 ? 0 : i + 1);
20687 return;
20689 case 'T':
20690 /* Print the symbolic name of a branch target register. */
20691 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PLTSEQ)
20692 x = XVECEXP (x, 0, 0);
20693 if (!REG_P (x) || (REGNO (x) != LR_REGNO
20694 && REGNO (x) != CTR_REGNO))
20695 output_operand_lossage ("invalid %%T value");
20696 else if (REGNO (x) == LR_REGNO)
20697 fputs ("lr", file);
20698 else
20699 fputs ("ctr", file);
20700 return;
20702 case 'u':
20703 /* High-order or low-order 16 bits of constant, whichever is non-zero,
20704 for use in unsigned operand. */
20705 if (! INT_P (x))
20707 output_operand_lossage ("invalid %%u value");
20708 return;
20711 uval = INTVAL (x);
20712 if ((uval & 0xffff) == 0)
20713 uval >>= 16;
20715 fprintf (file, HOST_WIDE_INT_PRINT_HEX, uval & 0xffff);
20716 return;
20718 case 'v':
20719 /* High-order 16 bits of constant for use in signed operand. */
20720 if (! INT_P (x))
20721 output_operand_lossage ("invalid %%v value");
20722 else
20723 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
20724 (INTVAL (x) >> 16) & 0xffff);
20725 return;
20727 case 'U':
20728 /* Print `u' if this has an auto-increment or auto-decrement. */
20729 if (MEM_P (x)
20730 && (GET_CODE (XEXP (x, 0)) == PRE_INC
20731 || GET_CODE (XEXP (x, 0)) == PRE_DEC
20732 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
20733 putc ('u', file);
20734 return;
20736 case 'V':
20737 /* Print the trap code for this operand. */
20738 switch (GET_CODE (x))
20740 case EQ:
20741 fputs ("eq", file); /* 4 */
20742 break;
20743 case NE:
20744 fputs ("ne", file); /* 24 */
20745 break;
20746 case LT:
20747 fputs ("lt", file); /* 16 */
20748 break;
20749 case LE:
20750 fputs ("le", file); /* 20 */
20751 break;
20752 case GT:
20753 fputs ("gt", file); /* 8 */
20754 break;
20755 case GE:
20756 fputs ("ge", file); /* 12 */
20757 break;
20758 case LTU:
20759 fputs ("llt", file); /* 2 */
20760 break;
20761 case LEU:
20762 fputs ("lle", file); /* 6 */
20763 break;
20764 case GTU:
20765 fputs ("lgt", file); /* 1 */
20766 break;
20767 case GEU:
20768 fputs ("lge", file); /* 5 */
20769 break;
20770 default:
20771 output_operand_lossage ("invalid %%V value");
20773 break;
20775 case 'w':
20776 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
20777 normally. */
20778 if (INT_P (x))
20779 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
20780 ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
20781 else
20782 print_operand (file, x, 0);
20783 return;
20785 case 'x':
20786 /* X is a FPR or Altivec register used in a VSX context. */
20787 if (!REG_P (x) || !VSX_REGNO_P (REGNO (x)))
20788 output_operand_lossage ("invalid %%x value");
20789 else
20791 int reg = REGNO (x);
20792 int vsx_reg = (FP_REGNO_P (reg)
20793 ? reg - 32
20794 : reg - FIRST_ALTIVEC_REGNO + 32);
20796 #ifdef TARGET_REGNAMES
20797 if (TARGET_REGNAMES)
20798 fprintf (file, "%%vs%d", vsx_reg);
20799 else
20800 #endif
20801 fprintf (file, "%d", vsx_reg);
20803 return;
20805 case 'X':
20806 if (MEM_P (x)
20807 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
20808 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
20809 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
20810 putc ('x', file);
20811 return;
20813 case 'Y':
20814 /* Like 'L', for third word of TImode/PTImode */
20815 if (REG_P (x))
20816 fputs (reg_names[REGNO (x) + 2], file);
20817 else if (MEM_P (x))
20819 machine_mode mode = GET_MODE (x);
20820 if (GET_CODE (XEXP (x, 0)) == PRE_INC
20821 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
20822 output_address (mode, plus_constant (Pmode,
20823 XEXP (XEXP (x, 0), 0), 8));
20824 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
20825 output_address (mode, plus_constant (Pmode,
20826 XEXP (XEXP (x, 0), 0), 8));
20827 else
20828 output_address (mode, XEXP (adjust_address_nv (x, SImode, 8), 0));
20829 if (small_data_operand (x, GET_MODE (x)))
20830 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
20831 reg_names[SMALL_DATA_REG]);
20833 return;
20835 case 'z':
20836 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PLTSEQ)
20837 x = XVECEXP (x, 0, 1);
20838 /* X is a SYMBOL_REF. Write out the name preceded by a
20839 period and without any trailing data in brackets. Used for function
20840 names. If we are configured for System V (or the embedded ABI) on
20841 the PowerPC, do not emit the period, since those systems do not use
20842 TOCs and the like. */
20843 if (!SYMBOL_REF_P (x))
20845 output_operand_lossage ("invalid %%z value");
20846 return;
20849 /* For macho, check to see if we need a stub. */
20850 if (TARGET_MACHO)
20852 const char *name = XSTR (x, 0);
20853 #if TARGET_MACHO
20854 if (darwin_picsymbol_stubs
20855 && MACHOPIC_INDIRECT
20856 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
20857 name = machopic_indirection_name (x, /*stub_p=*/true);
20858 #endif
20859 assemble_name (file, name);
20861 else if (!DOT_SYMBOLS)
20862 assemble_name (file, XSTR (x, 0));
20863 else
20864 rs6000_output_function_entry (file, XSTR (x, 0));
20865 return;
20867 case 'Z':
20868 /* Like 'L', for last word of TImode/PTImode. */
20869 if (REG_P (x))
20870 fputs (reg_names[REGNO (x) + 3], file);
20871 else if (MEM_P (x))
20873 machine_mode mode = GET_MODE (x);
20874 if (GET_CODE (XEXP (x, 0)) == PRE_INC
20875 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
20876 output_address (mode, plus_constant (Pmode,
20877 XEXP (XEXP (x, 0), 0), 12));
20878 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
20879 output_address (mode, plus_constant (Pmode,
20880 XEXP (XEXP (x, 0), 0), 12));
20881 else
20882 output_address (mode, XEXP (adjust_address_nv (x, SImode, 12), 0));
20883 if (small_data_operand (x, GET_MODE (x)))
20884 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
20885 reg_names[SMALL_DATA_REG]);
20887 return;
20889 /* Print AltiVec memory operand. */
20890 case 'y':
20892 rtx tmp;
20894 gcc_assert (MEM_P (x));
20896 tmp = XEXP (x, 0);
20898 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (GET_MODE (x))
20899 && GET_CODE (tmp) == AND
20900 && CONST_INT_P (XEXP (tmp, 1))
20901 && INTVAL (XEXP (tmp, 1)) == -16)
20902 tmp = XEXP (tmp, 0);
20903 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
20904 && GET_CODE (tmp) == PRE_MODIFY)
20905 tmp = XEXP (tmp, 1);
20906 if (REG_P (tmp))
20907 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
20908 else
20910 if (GET_CODE (tmp) != PLUS
20911 || !REG_P (XEXP (tmp, 0))
20912 || !REG_P (XEXP (tmp, 1)))
20914 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
20915 break;
20918 if (REGNO (XEXP (tmp, 0)) == 0)
20919 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
20920 reg_names[ REGNO (XEXP (tmp, 0)) ]);
20921 else
20922 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
20923 reg_names[ REGNO (XEXP (tmp, 1)) ]);
20925 break;
20928 case 0:
20929 if (REG_P (x))
20930 fprintf (file, "%s", reg_names[REGNO (x)]);
20931 else if (MEM_P (x))
20933 /* We need to handle PRE_INC and PRE_DEC here, since we need to
20934 know the width from the mode. */
20935 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
20936 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
20937 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
20938 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
20939 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
20940 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
20941 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
20942 output_address (GET_MODE (x), XEXP (XEXP (x, 0), 1));
20943 else
20944 output_address (GET_MODE (x), XEXP (x, 0));
20946 else if (toc_relative_expr_p (x, false,
20947 &tocrel_base_oac, &tocrel_offset_oac))
20948 /* This hack along with a corresponding hack in
20949 rs6000_output_addr_const_extra arranges to output addends
20950 where the assembler expects to find them. eg.
20951 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
20952 without this hack would be output as "x@toc+4". We
20953 want "x+4@toc". */
20954 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
20955 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
20956 output_addr_const (file, XVECEXP (x, 0, 0));
20957 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PLTSEQ)
20958 output_addr_const (file, XVECEXP (x, 0, 1));
20959 else
20960 output_addr_const (file, x);
20961 return;
20963 case '&':
20964 if (const char *name = get_some_local_dynamic_name ())
20965 assemble_name (file, name);
20966 else
20967 output_operand_lossage ("'%%&' used without any "
20968 "local dynamic TLS references");
20969 return;
20971 default:
20972 output_operand_lossage ("invalid %%xn code");
20976 /* Print the address of an operand. */
20978 void
20979 print_operand_address (FILE *file, rtx x)
20981 if (REG_P (x))
20982 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
20984 /* Is it a pc-relative address? */
20985 else if (pcrel_address (x, Pmode))
20987 HOST_WIDE_INT offset;
20989 if (GET_CODE (x) == CONST)
20990 x = XEXP (x, 0);
20992 if (GET_CODE (x) == PLUS)
20994 offset = INTVAL (XEXP (x, 1));
20995 x = XEXP (x, 0);
20997 else
20998 offset = 0;
21000 output_addr_const (file, x);
21002 if (offset)
21003 fprintf (file, "%+" PRId64, offset);
21005 fputs ("@pcrel", file);
21007 else if (SYMBOL_REF_P (x) || GET_CODE (x) == CONST
21008 || GET_CODE (x) == LABEL_REF)
21010 output_addr_const (file, x);
21011 if (small_data_operand (x, GET_MODE (x)))
21012 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21013 reg_names[SMALL_DATA_REG]);
21014 else
21015 gcc_assert (!TARGET_TOC);
21017 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21018 && REG_P (XEXP (x, 1)))
21020 if (REGNO (XEXP (x, 0)) == 0)
21021 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
21022 reg_names[ REGNO (XEXP (x, 0)) ]);
21023 else
21024 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
21025 reg_names[ REGNO (XEXP (x, 1)) ]);
21027 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21028 && CONST_INT_P (XEXP (x, 1)))
21029 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
21030 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
21031 #if TARGET_MACHO
21032 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21033 && CONSTANT_P (XEXP (x, 1)))
21035 fprintf (file, "lo16(");
21036 output_addr_const (file, XEXP (x, 1));
21037 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21039 #endif
21040 #if TARGET_ELF
21041 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21042 && CONSTANT_P (XEXP (x, 1)))
21044 output_addr_const (file, XEXP (x, 1));
21045 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21047 #endif
21048 else if (toc_relative_expr_p (x, false, &tocrel_base_oac, &tocrel_offset_oac))
21050 /* This hack along with a corresponding hack in
21051 rs6000_output_addr_const_extra arranges to output addends
21052 where the assembler expects to find them. eg.
21053 (lo_sum (reg 9)
21054 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
21055 without this hack would be output as "x@toc+8@l(9)". We
21056 want "x+8@toc@l(9)". */
21057 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21058 if (GET_CODE (x) == LO_SUM)
21059 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
21060 else
21061 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base_oac, 0, 1))]);
21063 else
21064 output_addr_const (file, x);
21067 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
21069 static bool
21070 rs6000_output_addr_const_extra (FILE *file, rtx x)
21072 if (GET_CODE (x) == UNSPEC)
21073 switch (XINT (x, 1))
21075 case UNSPEC_TOCREL:
21076 gcc_checking_assert (SYMBOL_REF_P (XVECEXP (x, 0, 0))
21077 && REG_P (XVECEXP (x, 0, 1))
21078 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
21079 output_addr_const (file, XVECEXP (x, 0, 0));
21080 if (x == tocrel_base_oac && tocrel_offset_oac != const0_rtx)
21082 if (INTVAL (tocrel_offset_oac) >= 0)
21083 fprintf (file, "+");
21084 output_addr_const (file, CONST_CAST_RTX (tocrel_offset_oac));
21086 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
21088 putc ('-', file);
21089 assemble_name (file, toc_label_name);
21090 need_toc_init = 1;
21092 else if (TARGET_ELF)
21093 fputs ("@toc", file);
21094 return true;
21096 #if TARGET_MACHO
21097 case UNSPEC_MACHOPIC_OFFSET:
21098 output_addr_const (file, XVECEXP (x, 0, 0));
21099 putc ('-', file);
21100 machopic_output_function_base_name (file);
21101 return true;
21102 #endif
21104 return false;
21107 /* Target hook for assembling integer objects. The PowerPC version has
21108 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
21109 is defined. It also needs to handle DI-mode objects on 64-bit
21110 targets. */
21112 static bool
21113 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
21115 #ifdef RELOCATABLE_NEEDS_FIXUP
21116 /* Special handling for SI values. */
21117 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
21119 static int recurse = 0;
21121 /* For -mrelocatable, we mark all addresses that need to be fixed up in
21122 the .fixup section. Since the TOC section is already relocated, we
21123 don't need to mark it here. We used to skip the text section, but it
21124 should never be valid for relocated addresses to be placed in the text
21125 section. */
21126 if (DEFAULT_ABI == ABI_V4
21127 && (TARGET_RELOCATABLE || flag_pic > 1)
21128 && in_section != toc_section
21129 && !recurse
21130 && !CONST_SCALAR_INT_P (x)
21131 && CONSTANT_P (x))
21133 char buf[256];
21135 recurse = 1;
21136 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
21137 fixuplabelno++;
21138 ASM_OUTPUT_LABEL (asm_out_file, buf);
21139 fprintf (asm_out_file, "\t.long\t(");
21140 output_addr_const (asm_out_file, x);
21141 fprintf (asm_out_file, ")@fixup\n");
21142 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
21143 ASM_OUTPUT_ALIGN (asm_out_file, 2);
21144 fprintf (asm_out_file, "\t.long\t");
21145 assemble_name (asm_out_file, buf);
21146 fprintf (asm_out_file, "\n\t.previous\n");
21147 recurse = 0;
21148 return true;
21150 /* Remove initial .'s to turn a -mcall-aixdesc function
21151 address into the address of the descriptor, not the function
21152 itself. */
21153 else if (SYMBOL_REF_P (x)
21154 && XSTR (x, 0)[0] == '.'
21155 && DEFAULT_ABI == ABI_AIX)
21157 const char *name = XSTR (x, 0);
21158 while (*name == '.')
21159 name++;
21161 fprintf (asm_out_file, "\t.long\t%s\n", name);
21162 return true;
21165 #endif /* RELOCATABLE_NEEDS_FIXUP */
21166 return default_assemble_integer (x, size, aligned_p);
21169 /* Return a template string for assembly to emit when making an
21170 external call. FUNOP is the call mem argument operand number. */
21172 static const char *
21173 rs6000_call_template_1 (rtx *operands, unsigned int funop, bool sibcall)
21175 /* -Wformat-overflow workaround, without which gcc thinks that %u
21176 might produce 10 digits. */
21177 gcc_assert (funop <= MAX_RECOG_OPERANDS);
21179 char arg[12];
21180 arg[0] = 0;
21181 if (TARGET_TLS_MARKERS && GET_CODE (operands[funop + 1]) == UNSPEC)
21183 if (XINT (operands[funop + 1], 1) == UNSPEC_TLSGD)
21184 sprintf (arg, "(%%%u@tlsgd)", funop + 1);
21185 else if (XINT (operands[funop + 1], 1) == UNSPEC_TLSLD)
21186 sprintf (arg, "(%%&@tlsld)");
21187 else
21188 gcc_unreachable ();
21191 /* The magic 32768 offset here corresponds to the offset of
21192 r30 in .got2, as given by LCTOC1. See sysv4.h:toc_section. */
21193 char z[11];
21194 sprintf (z, "%%z%u%s", funop,
21195 (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic == 2
21196 ? "+32768" : ""));
21198 static char str[32]; /* 1 spare */
21199 if (rs6000_pcrel_p (cfun))
21200 sprintf (str, "b%s %s@notoc%s", sibcall ? "" : "l", z, arg);
21201 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
21202 sprintf (str, "b%s %s%s%s", sibcall ? "" : "l", z, arg,
21203 sibcall ? "" : "\n\tnop");
21204 else if (DEFAULT_ABI == ABI_V4)
21205 sprintf (str, "b%s %s%s%s", sibcall ? "" : "l", z, arg,
21206 flag_pic ? "@plt" : "");
21207 #if TARGET_MACHO
21208 /* If/when we remove the mlongcall opt, we can share the AIX/ELGv2 case. */
21209 else if (DEFAULT_ABI == ABI_DARWIN)
21211 /* The cookie is in operand func+2. */
21212 gcc_checking_assert (GET_CODE (operands[funop + 2]) == CONST_INT);
21213 int cookie = INTVAL (operands[funop + 2]);
21214 if (cookie & CALL_LONG)
21216 tree funname = get_identifier (XSTR (operands[funop], 0));
21217 tree labelname = get_prev_label (funname);
21218 gcc_checking_assert (labelname && !sibcall);
21220 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
21221 instruction will reach 'foo', otherwise link as 'bl L42'".
21222 "L42" should be a 'branch island', that will do a far jump to
21223 'foo'. Branch islands are generated in
21224 macho_branch_islands(). */
21225 sprintf (str, "jbsr %%z%u,%.10s", funop,
21226 IDENTIFIER_POINTER (labelname));
21228 else
21229 /* Same as AIX or ELFv2, except to keep backwards compat, no nop
21230 after the call. */
21231 sprintf (str, "b%s %s%s", sibcall ? "" : "l", z, arg);
21233 #endif
21234 else
21235 gcc_unreachable ();
21236 return str;
21239 const char *
21240 rs6000_call_template (rtx *operands, unsigned int funop)
21242 return rs6000_call_template_1 (operands, funop, false);
21245 const char *
21246 rs6000_sibcall_template (rtx *operands, unsigned int funop)
21248 return rs6000_call_template_1 (operands, funop, true);
21251 /* As above, for indirect calls. */
21253 static const char *
21254 rs6000_indirect_call_template_1 (rtx *operands, unsigned int funop,
21255 bool sibcall)
21257 /* -Wformat-overflow workaround, without which gcc thinks that %u
21258 might produce 10 digits. Note that -Wformat-overflow will not
21259 currently warn here for str[], so do not rely on a warning to
21260 ensure str[] is correctly sized. */
21261 gcc_assert (funop <= MAX_RECOG_OPERANDS);
21263 /* Currently, funop is either 0 or 1. The maximum string is always
21264 a !speculate 64-bit __tls_get_addr call.
21266 ABI_ELFv2, pcrel:
21267 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21268 . 35 .reloc .,R_PPC64_PLTSEQ_NOTOC,%z1\n\t
21269 . 9 crset 2\n\t
21270 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21271 . 36 .reloc .,R_PPC64_PLTCALL_NOTOC,%z1\n\t
21272 . 8 beq%T1l-
21273 .---
21274 .142
21276 ABI_AIX:
21277 . 9 ld 2,%3\n\t
21278 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21279 . 29 .reloc .,R_PPC64_PLTSEQ,%z1\n\t
21280 . 9 crset 2\n\t
21281 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21282 . 30 .reloc .,R_PPC64_PLTCALL,%z1\n\t
21283 . 10 beq%T1l-\n\t
21284 . 10 ld 2,%4(1)
21285 .---
21286 .151
21288 ABI_ELFv2:
21289 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21290 . 29 .reloc .,R_PPC64_PLTSEQ,%z1\n\t
21291 . 9 crset 2\n\t
21292 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21293 . 30 .reloc .,R_PPC64_PLTCALL,%z1\n\t
21294 . 10 beq%T1l-\n\t
21295 . 10 ld 2,%3(1)
21296 .---
21297 .142
21299 ABI_V4:
21300 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21301 . 35 .reloc .,R_PPC64_PLTSEQ,%z1+32768\n\t
21302 . 9 crset 2\n\t
21303 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21304 . 36 .reloc .,R_PPC64_PLTCALL,%z1+32768\n\t
21305 . 8 beq%T1l-
21306 .---
21307 .141 */
21308 static char str[160]; /* 8 spare */
21309 char *s = str;
21310 const char *ptrload = TARGET_64BIT ? "d" : "wz";
21312 if (DEFAULT_ABI == ABI_AIX)
21313 s += sprintf (s,
21314 "l%s 2,%%%u\n\t",
21315 ptrload, funop + 2);
21317 /* We don't need the extra code to stop indirect call speculation if
21318 calling via LR. */
21319 bool speculate = (TARGET_MACHO
21320 || rs6000_speculate_indirect_jumps
21321 || (REG_P (operands[funop])
21322 && REGNO (operands[funop]) == LR_REGNO));
21324 if (TARGET_PLTSEQ && GET_CODE (operands[funop]) == UNSPEC)
21326 const char *rel64 = TARGET_64BIT ? "64" : "";
21327 char tls[29];
21328 tls[0] = 0;
21329 if (TARGET_TLS_MARKERS && GET_CODE (operands[funop + 1]) == UNSPEC)
21331 if (XINT (operands[funop + 1], 1) == UNSPEC_TLSGD)
21332 sprintf (tls, ".reloc .,R_PPC%s_TLSGD,%%%u\n\t",
21333 rel64, funop + 1);
21334 else if (XINT (operands[funop + 1], 1) == UNSPEC_TLSLD)
21335 sprintf (tls, ".reloc .,R_PPC%s_TLSLD,%%&\n\t",
21336 rel64);
21337 else
21338 gcc_unreachable ();
21341 const char *notoc = rs6000_pcrel_p (cfun) ? "_NOTOC" : "";
21342 const char *addend = (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT
21343 && flag_pic == 2 ? "+32768" : "");
21344 if (!speculate)
21346 s += sprintf (s,
21347 "%s.reloc .,R_PPC%s_PLTSEQ%s,%%z%u%s\n\t",
21348 tls, rel64, notoc, funop, addend);
21349 s += sprintf (s, "crset 2\n\t");
21351 s += sprintf (s,
21352 "%s.reloc .,R_PPC%s_PLTCALL%s,%%z%u%s\n\t",
21353 tls, rel64, notoc, funop, addend);
21355 else if (!speculate)
21356 s += sprintf (s, "crset 2\n\t");
21358 if (rs6000_pcrel_p (cfun))
21360 if (speculate)
21361 sprintf (s, "b%%T%ul", funop);
21362 else
21363 sprintf (s, "beq%%T%ul-", funop);
21365 else if (DEFAULT_ABI == ABI_AIX)
21367 if (speculate)
21368 sprintf (s,
21369 "b%%T%ul\n\t"
21370 "l%s 2,%%%u(1)",
21371 funop, ptrload, funop + 3);
21372 else
21373 sprintf (s,
21374 "beq%%T%ul-\n\t"
21375 "l%s 2,%%%u(1)",
21376 funop, ptrload, funop + 3);
21378 else if (DEFAULT_ABI == ABI_ELFv2)
21380 if (speculate)
21381 sprintf (s,
21382 "b%%T%ul\n\t"
21383 "l%s 2,%%%u(1)",
21384 funop, ptrload, funop + 2);
21385 else
21386 sprintf (s,
21387 "beq%%T%ul-\n\t"
21388 "l%s 2,%%%u(1)",
21389 funop, ptrload, funop + 2);
21391 else
21393 if (speculate)
21394 sprintf (s,
21395 "b%%T%u%s",
21396 funop, sibcall ? "" : "l");
21397 else
21398 sprintf (s,
21399 "beq%%T%u%s-%s",
21400 funop, sibcall ? "" : "l", sibcall ? "\n\tb $" : "");
21402 return str;
21405 const char *
21406 rs6000_indirect_call_template (rtx *operands, unsigned int funop)
21408 return rs6000_indirect_call_template_1 (operands, funop, false);
21411 const char *
21412 rs6000_indirect_sibcall_template (rtx *operands, unsigned int funop)
21414 return rs6000_indirect_call_template_1 (operands, funop, true);
21417 #if HAVE_AS_PLTSEQ
21418 /* Output indirect call insns. WHICH identifies the type of sequence. */
21419 const char *
21420 rs6000_pltseq_template (rtx *operands, int which)
21422 const char *rel64 = TARGET_64BIT ? "64" : "";
21423 char tls[30];
21424 tls[0] = 0;
21425 if (TARGET_TLS_MARKERS && GET_CODE (operands[3]) == UNSPEC)
21427 char off = which == RS6000_PLTSEQ_PLT_PCREL34 ? '8' : '4';
21428 if (XINT (operands[3], 1) == UNSPEC_TLSGD)
21429 sprintf (tls, ".reloc .-%c,R_PPC%s_TLSGD,%%3\n\t",
21430 off, rel64);
21431 else if (XINT (operands[3], 1) == UNSPEC_TLSLD)
21432 sprintf (tls, ".reloc .-%c,R_PPC%s_TLSLD,%%&\n\t",
21433 off, rel64);
21434 else
21435 gcc_unreachable ();
21438 gcc_assert (DEFAULT_ABI == ABI_ELFv2 || DEFAULT_ABI == ABI_V4);
21439 static char str[96]; /* 10 spare */
21440 char off = WORDS_BIG_ENDIAN ? '2' : '4';
21441 const char *addend = (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT
21442 && flag_pic == 2 ? "+32768" : "");
21443 switch (which)
21445 case RS6000_PLTSEQ_TOCSAVE:
21446 sprintf (str,
21447 "st%s\n\t"
21448 "%s.reloc .-4,R_PPC%s_PLTSEQ,%%z2",
21449 TARGET_64BIT ? "d 2,24(1)" : "w 2,12(1)",
21450 tls, rel64);
21451 break;
21452 case RS6000_PLTSEQ_PLT16_HA:
21453 if (DEFAULT_ABI == ABI_V4 && !flag_pic)
21454 sprintf (str,
21455 "lis %%0,0\n\t"
21456 "%s.reloc .-%c,R_PPC%s_PLT16_HA,%%z2",
21457 tls, off, rel64);
21458 else
21459 sprintf (str,
21460 "addis %%0,%%1,0\n\t"
21461 "%s.reloc .-%c,R_PPC%s_PLT16_HA,%%z2%s",
21462 tls, off, rel64, addend);
21463 break;
21464 case RS6000_PLTSEQ_PLT16_LO:
21465 sprintf (str,
21466 "l%s %%0,0(%%1)\n\t"
21467 "%s.reloc .-%c,R_PPC%s_PLT16_LO%s,%%z2%s",
21468 TARGET_64BIT ? "d" : "wz",
21469 tls, off, rel64, TARGET_64BIT ? "_DS" : "", addend);
21470 break;
21471 case RS6000_PLTSEQ_MTCTR:
21472 sprintf (str,
21473 "mtctr %%1\n\t"
21474 "%s.reloc .-4,R_PPC%s_PLTSEQ,%%z2%s",
21475 tls, rel64, addend);
21476 break;
21477 case RS6000_PLTSEQ_PLT_PCREL34:
21478 sprintf (str,
21479 "pl%s %%0,0(0),1\n\t"
21480 "%s.reloc .-8,R_PPC%s_PLT_PCREL34_NOTOC,%%z2",
21481 TARGET_64BIT ? "d" : "wz",
21482 tls, rel64);
21483 break;
21484 default:
21485 gcc_unreachable ();
21487 return str;
21489 #endif
21491 /* Helper function to return whether a MODE can do prefixed loads/stores.
21492 VOIDmode is used when we are loading the pc-relative address into a base
21493 register, but we are not using it as part of a memory operation. As modes
21494 add support for prefixed memory, they will be added here. */
21496 static bool
21497 mode_supports_prefixed_address_p (machine_mode mode)
21499 return mode == VOIDmode;
21502 /* Function to return true if ADDR is a valid prefixed memory address that uses
21503 mode MODE. */
21505 bool
21506 rs6000_prefixed_address (rtx addr, machine_mode mode)
21508 if (!TARGET_PREFIXED_ADDR || !mode_supports_prefixed_address_p (mode))
21509 return false;
21511 /* Check for PC-relative addresses. */
21512 if (pcrel_address (addr, Pmode))
21513 return true;
21515 /* Check for prefixed memory addresses that have a large numeric offset,
21516 or an offset that can't be used for a DS/DQ-form memory operation. */
21517 if (GET_CODE (addr) == PLUS)
21519 rtx op0 = XEXP (addr, 0);
21520 rtx op1 = XEXP (addr, 1);
21522 if (!base_reg_operand (op0, Pmode) || !CONST_INT_P (op1))
21523 return false;
21525 HOST_WIDE_INT value = INTVAL (op1);
21526 if (!SIGNED_34BIT_OFFSET_P (value))
21527 return false;
21529 /* Offset larger than 16-bits? */
21530 if (!SIGNED_16BIT_OFFSET_P (value))
21531 return true;
21533 /* DQ instruction (bottom 4 bits must be 0) for vectors. */
21534 HOST_WIDE_INT mask;
21535 if (GET_MODE_SIZE (mode) >= 16)
21536 mask = 15;
21538 /* DS instruction (bottom 2 bits must be 0). For 32-bit integers, we
21539 need to use DS instructions if we are sign-extending the value with
21540 LWA. For 32-bit floating point, we need DS instructions to load and
21541 store values to the traditional Altivec registers. */
21542 else if (GET_MODE_SIZE (mode) >= 4)
21543 mask = 3;
21545 /* QImode/HImode has no restrictions. */
21546 else
21547 return true;
21549 /* Return true if we must use a prefixed instruction. */
21550 return (value & mask) != 0;
21553 return false;
21556 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
21557 /* Emit an assembler directive to set symbol visibility for DECL to
21558 VISIBILITY_TYPE. */
21560 static void
21561 rs6000_assemble_visibility (tree decl, int vis)
21563 if (TARGET_XCOFF)
21564 return;
21566 /* Functions need to have their entry point symbol visibility set as
21567 well as their descriptor symbol visibility. */
21568 if (DEFAULT_ABI == ABI_AIX
21569 && DOT_SYMBOLS
21570 && TREE_CODE (decl) == FUNCTION_DECL)
21572 static const char * const visibility_types[] = {
21573 NULL, "protected", "hidden", "internal"
21576 const char *name, *type;
21578 name = ((* targetm.strip_name_encoding)
21579 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
21580 type = visibility_types[vis];
21582 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
21583 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
21585 else
21586 default_assemble_visibility (decl, vis);
21588 #endif
21590 enum rtx_code
21591 rs6000_reverse_condition (machine_mode mode, enum rtx_code code)
21593 /* Reversal of FP compares takes care -- an ordered compare
21594 becomes an unordered compare and vice versa. */
21595 if (mode == CCFPmode
21596 && (!flag_finite_math_only
21597 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
21598 || code == UNEQ || code == LTGT))
21599 return reverse_condition_maybe_unordered (code);
21600 else
21601 return reverse_condition (code);
21604 /* Generate a compare for CODE. Return a brand-new rtx that
21605 represents the result of the compare. */
21607 static rtx
21608 rs6000_generate_compare (rtx cmp, machine_mode mode)
21610 machine_mode comp_mode;
21611 rtx compare_result;
21612 enum rtx_code code = GET_CODE (cmp);
21613 rtx op0 = XEXP (cmp, 0);
21614 rtx op1 = XEXP (cmp, 1);
21616 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21617 comp_mode = CCmode;
21618 else if (FLOAT_MODE_P (mode))
21619 comp_mode = CCFPmode;
21620 else if (code == GTU || code == LTU
21621 || code == GEU || code == LEU)
21622 comp_mode = CCUNSmode;
21623 else if ((code == EQ || code == NE)
21624 && unsigned_reg_p (op0)
21625 && (unsigned_reg_p (op1)
21626 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
21627 /* These are unsigned values, perhaps there will be a later
21628 ordering compare that can be shared with this one. */
21629 comp_mode = CCUNSmode;
21630 else
21631 comp_mode = CCmode;
21633 /* If we have an unsigned compare, make sure we don't have a signed value as
21634 an immediate. */
21635 if (comp_mode == CCUNSmode && CONST_INT_P (op1)
21636 && INTVAL (op1) < 0)
21638 op0 = copy_rtx_if_shared (op0);
21639 op1 = force_reg (GET_MODE (op0), op1);
21640 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
21643 /* First, the compare. */
21644 compare_result = gen_reg_rtx (comp_mode);
21646 /* IEEE 128-bit support in VSX registers when we do not have hardware
21647 support. */
21648 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21650 rtx libfunc = NULL_RTX;
21651 bool check_nan = false;
21652 rtx dest;
21654 switch (code)
21656 case EQ:
21657 case NE:
21658 libfunc = optab_libfunc (eq_optab, mode);
21659 break;
21661 case GT:
21662 case GE:
21663 libfunc = optab_libfunc (ge_optab, mode);
21664 break;
21666 case LT:
21667 case LE:
21668 libfunc = optab_libfunc (le_optab, mode);
21669 break;
21671 case UNORDERED:
21672 case ORDERED:
21673 libfunc = optab_libfunc (unord_optab, mode);
21674 code = (code == UNORDERED) ? NE : EQ;
21675 break;
21677 case UNGE:
21678 case UNGT:
21679 check_nan = true;
21680 libfunc = optab_libfunc (ge_optab, mode);
21681 code = (code == UNGE) ? GE : GT;
21682 break;
21684 case UNLE:
21685 case UNLT:
21686 check_nan = true;
21687 libfunc = optab_libfunc (le_optab, mode);
21688 code = (code == UNLE) ? LE : LT;
21689 break;
21691 case UNEQ:
21692 case LTGT:
21693 check_nan = true;
21694 libfunc = optab_libfunc (eq_optab, mode);
21695 code = (code = UNEQ) ? EQ : NE;
21696 break;
21698 default:
21699 gcc_unreachable ();
21702 gcc_assert (libfunc);
21704 if (!check_nan)
21705 dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
21706 SImode, op0, mode, op1, mode);
21708 /* The library signals an exception for signalling NaNs, so we need to
21709 handle isgreater, etc. by first checking isordered. */
21710 else
21712 rtx ne_rtx, normal_dest, unord_dest;
21713 rtx unord_func = optab_libfunc (unord_optab, mode);
21714 rtx join_label = gen_label_rtx ();
21715 rtx join_ref = gen_rtx_LABEL_REF (VOIDmode, join_label);
21716 rtx unord_cmp = gen_reg_rtx (comp_mode);
21719 /* Test for either value being a NaN. */
21720 gcc_assert (unord_func);
21721 unord_dest = emit_library_call_value (unord_func, NULL_RTX, LCT_CONST,
21722 SImode, op0, mode, op1, mode);
21724 /* Set value (0) if either value is a NaN, and jump to the join
21725 label. */
21726 dest = gen_reg_rtx (SImode);
21727 emit_move_insn (dest, const1_rtx);
21728 emit_insn (gen_rtx_SET (unord_cmp,
21729 gen_rtx_COMPARE (comp_mode, unord_dest,
21730 const0_rtx)));
21732 ne_rtx = gen_rtx_NE (comp_mode, unord_cmp, const0_rtx);
21733 emit_jump_insn (gen_rtx_SET (pc_rtx,
21734 gen_rtx_IF_THEN_ELSE (VOIDmode, ne_rtx,
21735 join_ref,
21736 pc_rtx)));
21738 /* Do the normal comparison, knowing that the values are not
21739 NaNs. */
21740 normal_dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
21741 SImode, op0, mode, op1, mode);
21743 emit_insn (gen_cstoresi4 (dest,
21744 gen_rtx_fmt_ee (code, SImode, normal_dest,
21745 const0_rtx),
21746 normal_dest, const0_rtx));
21748 /* Join NaN and non-Nan paths. Compare dest against 0. */
21749 emit_label (join_label);
21750 code = NE;
21753 emit_insn (gen_rtx_SET (compare_result,
21754 gen_rtx_COMPARE (comp_mode, dest, const0_rtx)));
21757 else
21759 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
21760 CLOBBERs to match cmptf_internal2 pattern. */
21761 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
21762 && FLOAT128_IBM_P (GET_MODE (op0))
21763 && TARGET_HARD_FLOAT)
21764 emit_insn (gen_rtx_PARALLEL (VOIDmode,
21765 gen_rtvec (10,
21766 gen_rtx_SET (compare_result,
21767 gen_rtx_COMPARE (comp_mode, op0, op1)),
21768 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21769 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21770 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21771 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21772 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21773 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21774 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21775 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21776 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
21777 else if (GET_CODE (op1) == UNSPEC
21778 && XINT (op1, 1) == UNSPEC_SP_TEST)
21780 rtx op1b = XVECEXP (op1, 0, 0);
21781 comp_mode = CCEQmode;
21782 compare_result = gen_reg_rtx (CCEQmode);
21783 if (TARGET_64BIT)
21784 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
21785 else
21786 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
21788 else
21789 emit_insn (gen_rtx_SET (compare_result,
21790 gen_rtx_COMPARE (comp_mode, op0, op1)));
21793 /* Some kinds of FP comparisons need an OR operation;
21794 under flag_finite_math_only we don't bother. */
21795 if (FLOAT_MODE_P (mode)
21796 && (!FLOAT128_IEEE_P (mode) || TARGET_FLOAT128_HW)
21797 && !flag_finite_math_only
21798 && (code == LE || code == GE
21799 || code == UNEQ || code == LTGT
21800 || code == UNGT || code == UNLT))
21802 enum rtx_code or1, or2;
21803 rtx or1_rtx, or2_rtx, compare2_rtx;
21804 rtx or_result = gen_reg_rtx (CCEQmode);
21806 switch (code)
21808 case LE: or1 = LT; or2 = EQ; break;
21809 case GE: or1 = GT; or2 = EQ; break;
21810 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
21811 case LTGT: or1 = LT; or2 = GT; break;
21812 case UNGT: or1 = UNORDERED; or2 = GT; break;
21813 case UNLT: or1 = UNORDERED; or2 = LT; break;
21814 default: gcc_unreachable ();
21816 validate_condition_mode (or1, comp_mode);
21817 validate_condition_mode (or2, comp_mode);
21818 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
21819 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
21820 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
21821 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
21822 const_true_rtx);
21823 emit_insn (gen_rtx_SET (or_result, compare2_rtx));
21825 compare_result = or_result;
21826 code = EQ;
21829 validate_condition_mode (code, GET_MODE (compare_result));
21831 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
21835 /* Return the diagnostic message string if the binary operation OP is
21836 not permitted on TYPE1 and TYPE2, NULL otherwise. */
21838 static const char*
21839 rs6000_invalid_binary_op (int op ATTRIBUTE_UNUSED,
21840 const_tree type1,
21841 const_tree type2)
21843 machine_mode mode1 = TYPE_MODE (type1);
21844 machine_mode mode2 = TYPE_MODE (type2);
21846 /* For complex modes, use the inner type. */
21847 if (COMPLEX_MODE_P (mode1))
21848 mode1 = GET_MODE_INNER (mode1);
21850 if (COMPLEX_MODE_P (mode2))
21851 mode2 = GET_MODE_INNER (mode2);
21853 /* Don't allow IEEE 754R 128-bit binary floating point and IBM extended
21854 double to intermix unless -mfloat128-convert. */
21855 if (mode1 == mode2)
21856 return NULL;
21858 if (!TARGET_FLOAT128_CVT)
21860 if ((mode1 == KFmode && mode2 == IFmode)
21861 || (mode1 == IFmode && mode2 == KFmode))
21862 return N_("__float128 and __ibm128 cannot be used in the same "
21863 "expression");
21865 if (TARGET_IEEEQUAD
21866 && ((mode1 == IFmode && mode2 == TFmode)
21867 || (mode1 == TFmode && mode2 == IFmode)))
21868 return N_("__ibm128 and long double cannot be used in the same "
21869 "expression");
21871 if (!TARGET_IEEEQUAD
21872 && ((mode1 == KFmode && mode2 == TFmode)
21873 || (mode1 == TFmode && mode2 == KFmode)))
21874 return N_("__float128 and long double cannot be used in the same "
21875 "expression");
21878 return NULL;
21882 /* Expand floating point conversion to/from __float128 and __ibm128. */
21884 void
21885 rs6000_expand_float128_convert (rtx dest, rtx src, bool unsigned_p)
21887 machine_mode dest_mode = GET_MODE (dest);
21888 machine_mode src_mode = GET_MODE (src);
21889 convert_optab cvt = unknown_optab;
21890 bool do_move = false;
21891 rtx libfunc = NULL_RTX;
21892 rtx dest2;
21893 typedef rtx (*rtx_2func_t) (rtx, rtx);
21894 rtx_2func_t hw_convert = (rtx_2func_t)0;
21895 size_t kf_or_tf;
21897 struct hw_conv_t {
21898 rtx_2func_t from_df;
21899 rtx_2func_t from_sf;
21900 rtx_2func_t from_si_sign;
21901 rtx_2func_t from_si_uns;
21902 rtx_2func_t from_di_sign;
21903 rtx_2func_t from_di_uns;
21904 rtx_2func_t to_df;
21905 rtx_2func_t to_sf;
21906 rtx_2func_t to_si_sign;
21907 rtx_2func_t to_si_uns;
21908 rtx_2func_t to_di_sign;
21909 rtx_2func_t to_di_uns;
21910 } hw_conversions[2] = {
21911 /* convertions to/from KFmode */
21913 gen_extenddfkf2_hw, /* KFmode <- DFmode. */
21914 gen_extendsfkf2_hw, /* KFmode <- SFmode. */
21915 gen_float_kfsi2_hw, /* KFmode <- SImode (signed). */
21916 gen_floatuns_kfsi2_hw, /* KFmode <- SImode (unsigned). */
21917 gen_float_kfdi2_hw, /* KFmode <- DImode (signed). */
21918 gen_floatuns_kfdi2_hw, /* KFmode <- DImode (unsigned). */
21919 gen_trunckfdf2_hw, /* DFmode <- KFmode. */
21920 gen_trunckfsf2_hw, /* SFmode <- KFmode. */
21921 gen_fix_kfsi2_hw, /* SImode <- KFmode (signed). */
21922 gen_fixuns_kfsi2_hw, /* SImode <- KFmode (unsigned). */
21923 gen_fix_kfdi2_hw, /* DImode <- KFmode (signed). */
21924 gen_fixuns_kfdi2_hw, /* DImode <- KFmode (unsigned). */
21927 /* convertions to/from TFmode */
21929 gen_extenddftf2_hw, /* TFmode <- DFmode. */
21930 gen_extendsftf2_hw, /* TFmode <- SFmode. */
21931 gen_float_tfsi2_hw, /* TFmode <- SImode (signed). */
21932 gen_floatuns_tfsi2_hw, /* TFmode <- SImode (unsigned). */
21933 gen_float_tfdi2_hw, /* TFmode <- DImode (signed). */
21934 gen_floatuns_tfdi2_hw, /* TFmode <- DImode (unsigned). */
21935 gen_trunctfdf2_hw, /* DFmode <- TFmode. */
21936 gen_trunctfsf2_hw, /* SFmode <- TFmode. */
21937 gen_fix_tfsi2_hw, /* SImode <- TFmode (signed). */
21938 gen_fixuns_tfsi2_hw, /* SImode <- TFmode (unsigned). */
21939 gen_fix_tfdi2_hw, /* DImode <- TFmode (signed). */
21940 gen_fixuns_tfdi2_hw, /* DImode <- TFmode (unsigned). */
21944 if (dest_mode == src_mode)
21945 gcc_unreachable ();
21947 /* Eliminate memory operations. */
21948 if (MEM_P (src))
21949 src = force_reg (src_mode, src);
21951 if (MEM_P (dest))
21953 rtx tmp = gen_reg_rtx (dest_mode);
21954 rs6000_expand_float128_convert (tmp, src, unsigned_p);
21955 rs6000_emit_move (dest, tmp, dest_mode);
21956 return;
21959 /* Convert to IEEE 128-bit floating point. */
21960 if (FLOAT128_IEEE_P (dest_mode))
21962 if (dest_mode == KFmode)
21963 kf_or_tf = 0;
21964 else if (dest_mode == TFmode)
21965 kf_or_tf = 1;
21966 else
21967 gcc_unreachable ();
21969 switch (src_mode)
21971 case E_DFmode:
21972 cvt = sext_optab;
21973 hw_convert = hw_conversions[kf_or_tf].from_df;
21974 break;
21976 case E_SFmode:
21977 cvt = sext_optab;
21978 hw_convert = hw_conversions[kf_or_tf].from_sf;
21979 break;
21981 case E_KFmode:
21982 case E_IFmode:
21983 case E_TFmode:
21984 if (FLOAT128_IBM_P (src_mode))
21985 cvt = sext_optab;
21986 else
21987 do_move = true;
21988 break;
21990 case E_SImode:
21991 if (unsigned_p)
21993 cvt = ufloat_optab;
21994 hw_convert = hw_conversions[kf_or_tf].from_si_uns;
21996 else
21998 cvt = sfloat_optab;
21999 hw_convert = hw_conversions[kf_or_tf].from_si_sign;
22001 break;
22003 case E_DImode:
22004 if (unsigned_p)
22006 cvt = ufloat_optab;
22007 hw_convert = hw_conversions[kf_or_tf].from_di_uns;
22009 else
22011 cvt = sfloat_optab;
22012 hw_convert = hw_conversions[kf_or_tf].from_di_sign;
22014 break;
22016 default:
22017 gcc_unreachable ();
22021 /* Convert from IEEE 128-bit floating point. */
22022 else if (FLOAT128_IEEE_P (src_mode))
22024 if (src_mode == KFmode)
22025 kf_or_tf = 0;
22026 else if (src_mode == TFmode)
22027 kf_or_tf = 1;
22028 else
22029 gcc_unreachable ();
22031 switch (dest_mode)
22033 case E_DFmode:
22034 cvt = trunc_optab;
22035 hw_convert = hw_conversions[kf_or_tf].to_df;
22036 break;
22038 case E_SFmode:
22039 cvt = trunc_optab;
22040 hw_convert = hw_conversions[kf_or_tf].to_sf;
22041 break;
22043 case E_KFmode:
22044 case E_IFmode:
22045 case E_TFmode:
22046 if (FLOAT128_IBM_P (dest_mode))
22047 cvt = trunc_optab;
22048 else
22049 do_move = true;
22050 break;
22052 case E_SImode:
22053 if (unsigned_p)
22055 cvt = ufix_optab;
22056 hw_convert = hw_conversions[kf_or_tf].to_si_uns;
22058 else
22060 cvt = sfix_optab;
22061 hw_convert = hw_conversions[kf_or_tf].to_si_sign;
22063 break;
22065 case E_DImode:
22066 if (unsigned_p)
22068 cvt = ufix_optab;
22069 hw_convert = hw_conversions[kf_or_tf].to_di_uns;
22071 else
22073 cvt = sfix_optab;
22074 hw_convert = hw_conversions[kf_or_tf].to_di_sign;
22076 break;
22078 default:
22079 gcc_unreachable ();
22083 /* Both IBM format. */
22084 else if (FLOAT128_IBM_P (dest_mode) && FLOAT128_IBM_P (src_mode))
22085 do_move = true;
22087 else
22088 gcc_unreachable ();
22090 /* Handle conversion between TFmode/KFmode/IFmode. */
22091 if (do_move)
22092 emit_insn (gen_rtx_SET (dest, gen_rtx_FLOAT_EXTEND (dest_mode, src)));
22094 /* Handle conversion if we have hardware support. */
22095 else if (TARGET_FLOAT128_HW && hw_convert)
22096 emit_insn ((hw_convert) (dest, src));
22098 /* Call an external function to do the conversion. */
22099 else if (cvt != unknown_optab)
22101 libfunc = convert_optab_libfunc (cvt, dest_mode, src_mode);
22102 gcc_assert (libfunc != NULL_RTX);
22104 dest2 = emit_library_call_value (libfunc, dest, LCT_CONST, dest_mode,
22105 src, src_mode);
22107 gcc_assert (dest2 != NULL_RTX);
22108 if (!rtx_equal_p (dest, dest2))
22109 emit_move_insn (dest, dest2);
22112 else
22113 gcc_unreachable ();
22115 return;
22119 /* Emit RTL that sets a register to zero if OP1 and OP2 are equal. SCRATCH
22120 can be used as that dest register. Return the dest register. */
22123 rs6000_emit_eqne (machine_mode mode, rtx op1, rtx op2, rtx scratch)
22125 if (op2 == const0_rtx)
22126 return op1;
22128 if (GET_CODE (scratch) == SCRATCH)
22129 scratch = gen_reg_rtx (mode);
22131 if (logical_operand (op2, mode))
22132 emit_insn (gen_rtx_SET (scratch, gen_rtx_XOR (mode, op1, op2)));
22133 else
22134 emit_insn (gen_rtx_SET (scratch,
22135 gen_rtx_PLUS (mode, op1, negate_rtx (mode, op2))));
22137 return scratch;
22140 void
22141 rs6000_emit_sCOND (machine_mode mode, rtx operands[])
22143 rtx condition_rtx;
22144 machine_mode op_mode;
22145 enum rtx_code cond_code;
22146 rtx result = operands[0];
22148 condition_rtx = rs6000_generate_compare (operands[1], mode);
22149 cond_code = GET_CODE (condition_rtx);
22151 if (cond_code == NE
22152 || cond_code == GE || cond_code == LE
22153 || cond_code == GEU || cond_code == LEU
22154 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
22156 rtx not_result = gen_reg_rtx (CCEQmode);
22157 rtx not_op, rev_cond_rtx;
22158 machine_mode cc_mode;
22160 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
22162 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
22163 SImode, XEXP (condition_rtx, 0), const0_rtx);
22164 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
22165 emit_insn (gen_rtx_SET (not_result, not_op));
22166 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
22169 op_mode = GET_MODE (XEXP (operands[1], 0));
22170 if (op_mode == VOIDmode)
22171 op_mode = GET_MODE (XEXP (operands[1], 1));
22173 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
22175 PUT_MODE (condition_rtx, DImode);
22176 convert_move (result, condition_rtx, 0);
22178 else
22180 PUT_MODE (condition_rtx, SImode);
22181 emit_insn (gen_rtx_SET (result, condition_rtx));
22185 /* Emit a branch of kind CODE to location LOC. */
22187 void
22188 rs6000_emit_cbranch (machine_mode mode, rtx operands[])
22190 rtx condition_rtx, loc_ref;
22192 condition_rtx = rs6000_generate_compare (operands[0], mode);
22193 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
22194 emit_jump_insn (gen_rtx_SET (pc_rtx,
22195 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
22196 loc_ref, pc_rtx)));
22199 /* Return the string to output a conditional branch to LABEL, which is
22200 the operand template of the label, or NULL if the branch is really a
22201 conditional return.
22203 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
22204 condition code register and its mode specifies what kind of
22205 comparison we made.
22207 REVERSED is nonzero if we should reverse the sense of the comparison.
22209 INSN is the insn. */
22211 char *
22212 output_cbranch (rtx op, const char *label, int reversed, rtx_insn *insn)
22214 static char string[64];
22215 enum rtx_code code = GET_CODE (op);
22216 rtx cc_reg = XEXP (op, 0);
22217 machine_mode mode = GET_MODE (cc_reg);
22218 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
22219 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
22220 int really_reversed = reversed ^ need_longbranch;
22221 char *s = string;
22222 const char *ccode;
22223 const char *pred;
22224 rtx note;
22226 validate_condition_mode (code, mode);
22228 /* Work out which way this really branches. We could use
22229 reverse_condition_maybe_unordered here always but this
22230 makes the resulting assembler clearer. */
22231 if (really_reversed)
22233 /* Reversal of FP compares takes care -- an ordered compare
22234 becomes an unordered compare and vice versa. */
22235 if (mode == CCFPmode)
22236 code = reverse_condition_maybe_unordered (code);
22237 else
22238 code = reverse_condition (code);
22241 switch (code)
22243 /* Not all of these are actually distinct opcodes, but
22244 we distinguish them for clarity of the resulting assembler. */
22245 case NE: case LTGT:
22246 ccode = "ne"; break;
22247 case EQ: case UNEQ:
22248 ccode = "eq"; break;
22249 case GE: case GEU:
22250 ccode = "ge"; break;
22251 case GT: case GTU: case UNGT:
22252 ccode = "gt"; break;
22253 case LE: case LEU:
22254 ccode = "le"; break;
22255 case LT: case LTU: case UNLT:
22256 ccode = "lt"; break;
22257 case UNORDERED: ccode = "un"; break;
22258 case ORDERED: ccode = "nu"; break;
22259 case UNGE: ccode = "nl"; break;
22260 case UNLE: ccode = "ng"; break;
22261 default:
22262 gcc_unreachable ();
22265 /* Maybe we have a guess as to how likely the branch is. */
22266 pred = "";
22267 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
22268 if (note != NULL_RTX)
22270 /* PROB is the difference from 50%. */
22271 int prob = profile_probability::from_reg_br_prob_note (XINT (note, 0))
22272 .to_reg_br_prob_base () - REG_BR_PROB_BASE / 2;
22274 /* Only hint for highly probable/improbable branches on newer cpus when
22275 we have real profile data, as static prediction overrides processor
22276 dynamic prediction. For older cpus we may as well always hint, but
22277 assume not taken for branches that are very close to 50% as a
22278 mispredicted taken branch is more expensive than a
22279 mispredicted not-taken branch. */
22280 if (rs6000_always_hint
22281 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
22282 && (profile_status_for_fn (cfun) != PROFILE_GUESSED)
22283 && br_prob_note_reliable_p (note)))
22285 if (abs (prob) > REG_BR_PROB_BASE / 20
22286 && ((prob > 0) ^ need_longbranch))
22287 pred = "+";
22288 else
22289 pred = "-";
22293 if (label == NULL)
22294 s += sprintf (s, "b%slr%s ", ccode, pred);
22295 else
22296 s += sprintf (s, "b%s%s ", ccode, pred);
22298 /* We need to escape any '%' characters in the reg_names string.
22299 Assume they'd only be the first character.... */
22300 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
22301 *s++ = '%';
22302 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
22304 if (label != NULL)
22306 /* If the branch distance was too far, we may have to use an
22307 unconditional branch to go the distance. */
22308 if (need_longbranch)
22309 s += sprintf (s, ",$+8\n\tb %s", label);
22310 else
22311 s += sprintf (s, ",%s", label);
22314 return string;
22317 /* Return insn for VSX or Altivec comparisons. */
22319 static rtx
22320 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
22322 rtx mask;
22323 machine_mode mode = GET_MODE (op0);
22325 switch (code)
22327 default:
22328 break;
22330 case GE:
22331 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
22332 return NULL_RTX;
22333 /* FALLTHRU */
22335 case EQ:
22336 case GT:
22337 case GTU:
22338 case ORDERED:
22339 case UNORDERED:
22340 case UNEQ:
22341 case LTGT:
22342 mask = gen_reg_rtx (mode);
22343 emit_insn (gen_rtx_SET (mask, gen_rtx_fmt_ee (code, mode, op0, op1)));
22344 return mask;
22347 return NULL_RTX;
22350 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
22351 DMODE is expected destination mode. This is a recursive function. */
22353 static rtx
22354 rs6000_emit_vector_compare (enum rtx_code rcode,
22355 rtx op0, rtx op1,
22356 machine_mode dmode)
22358 rtx mask;
22359 bool swap_operands = false;
22360 bool try_again = false;
22362 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
22363 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
22365 /* See if the comparison works as is. */
22366 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22367 if (mask)
22368 return mask;
22370 switch (rcode)
22372 case LT:
22373 rcode = GT;
22374 swap_operands = true;
22375 try_again = true;
22376 break;
22377 case LTU:
22378 rcode = GTU;
22379 swap_operands = true;
22380 try_again = true;
22381 break;
22382 case NE:
22383 case UNLE:
22384 case UNLT:
22385 case UNGE:
22386 case UNGT:
22387 /* Invert condition and try again.
22388 e.g., A != B becomes ~(A==B). */
22390 enum rtx_code rev_code;
22391 enum insn_code nor_code;
22392 rtx mask2;
22394 rev_code = reverse_condition_maybe_unordered (rcode);
22395 if (rev_code == UNKNOWN)
22396 return NULL_RTX;
22398 nor_code = optab_handler (one_cmpl_optab, dmode);
22399 if (nor_code == CODE_FOR_nothing)
22400 return NULL_RTX;
22402 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
22403 if (!mask2)
22404 return NULL_RTX;
22406 mask = gen_reg_rtx (dmode);
22407 emit_insn (GEN_FCN (nor_code) (mask, mask2));
22408 return mask;
22410 break;
22411 case GE:
22412 case GEU:
22413 case LE:
22414 case LEU:
22415 /* Try GT/GTU/LT/LTU OR EQ */
22417 rtx c_rtx, eq_rtx;
22418 enum insn_code ior_code;
22419 enum rtx_code new_code;
22421 switch (rcode)
22423 case GE:
22424 new_code = GT;
22425 break;
22427 case GEU:
22428 new_code = GTU;
22429 break;
22431 case LE:
22432 new_code = LT;
22433 break;
22435 case LEU:
22436 new_code = LTU;
22437 break;
22439 default:
22440 gcc_unreachable ();
22443 ior_code = optab_handler (ior_optab, dmode);
22444 if (ior_code == CODE_FOR_nothing)
22445 return NULL_RTX;
22447 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
22448 if (!c_rtx)
22449 return NULL_RTX;
22451 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
22452 if (!eq_rtx)
22453 return NULL_RTX;
22455 mask = gen_reg_rtx (dmode);
22456 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
22457 return mask;
22459 break;
22460 default:
22461 return NULL_RTX;
22464 if (try_again)
22466 if (swap_operands)
22467 std::swap (op0, op1);
22469 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22470 if (mask)
22471 return mask;
22474 /* You only get two chances. */
22475 return NULL_RTX;
22478 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
22479 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
22480 operands for the relation operation COND. */
22483 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
22484 rtx cond, rtx cc_op0, rtx cc_op1)
22486 machine_mode dest_mode = GET_MODE (dest);
22487 machine_mode mask_mode = GET_MODE (cc_op0);
22488 enum rtx_code rcode = GET_CODE (cond);
22489 machine_mode cc_mode = CCmode;
22490 rtx mask;
22491 rtx cond2;
22492 bool invert_move = false;
22494 if (VECTOR_UNIT_NONE_P (dest_mode))
22495 return 0;
22497 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
22498 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
22500 switch (rcode)
22502 /* Swap operands if we can, and fall back to doing the operation as
22503 specified, and doing a NOR to invert the test. */
22504 case NE:
22505 case UNLE:
22506 case UNLT:
22507 case UNGE:
22508 case UNGT:
22509 /* Invert condition and try again.
22510 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
22511 invert_move = true;
22512 rcode = reverse_condition_maybe_unordered (rcode);
22513 if (rcode == UNKNOWN)
22514 return 0;
22515 break;
22517 case GE:
22518 case LE:
22519 if (GET_MODE_CLASS (mask_mode) == MODE_VECTOR_INT)
22521 /* Invert condition to avoid compound test. */
22522 invert_move = true;
22523 rcode = reverse_condition (rcode);
22525 break;
22527 case GTU:
22528 case GEU:
22529 case LTU:
22530 case LEU:
22531 /* Mark unsigned tests with CCUNSmode. */
22532 cc_mode = CCUNSmode;
22534 /* Invert condition to avoid compound test if necessary. */
22535 if (rcode == GEU || rcode == LEU)
22537 invert_move = true;
22538 rcode = reverse_condition (rcode);
22540 break;
22542 default:
22543 break;
22546 /* Get the vector mask for the given relational operations. */
22547 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
22549 if (!mask)
22550 return 0;
22552 if (invert_move)
22553 std::swap (op_true, op_false);
22555 /* Optimize vec1 == vec2, to know the mask generates -1/0. */
22556 if (GET_MODE_CLASS (dest_mode) == MODE_VECTOR_INT
22557 && (GET_CODE (op_true) == CONST_VECTOR
22558 || GET_CODE (op_false) == CONST_VECTOR))
22560 rtx constant_0 = CONST0_RTX (dest_mode);
22561 rtx constant_m1 = CONSTM1_RTX (dest_mode);
22563 if (op_true == constant_m1 && op_false == constant_0)
22565 emit_move_insn (dest, mask);
22566 return 1;
22569 else if (op_true == constant_0 && op_false == constant_m1)
22571 emit_insn (gen_rtx_SET (dest, gen_rtx_NOT (dest_mode, mask)));
22572 return 1;
22575 /* If we can't use the vector comparison directly, perhaps we can use
22576 the mask for the true or false fields, instead of loading up a
22577 constant. */
22578 if (op_true == constant_m1)
22579 op_true = mask;
22581 if (op_false == constant_0)
22582 op_false = mask;
22585 if (!REG_P (op_true) && !SUBREG_P (op_true))
22586 op_true = force_reg (dest_mode, op_true);
22588 if (!REG_P (op_false) && !SUBREG_P (op_false))
22589 op_false = force_reg (dest_mode, op_false);
22591 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
22592 CONST0_RTX (dest_mode));
22593 emit_insn (gen_rtx_SET (dest,
22594 gen_rtx_IF_THEN_ELSE (dest_mode,
22595 cond2,
22596 op_true,
22597 op_false)));
22598 return 1;
22601 /* ISA 3.0 (power9) minmax subcase to emit a XSMAXCDP or XSMINCDP instruction
22602 for SF/DF scalars. Move TRUE_COND to DEST if OP of the operands of the last
22603 comparison is nonzero/true, FALSE_COND if it is zero/false. Return 0 if the
22604 hardware has no such operation. */
22606 static int
22607 rs6000_emit_p9_fp_minmax (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22609 enum rtx_code code = GET_CODE (op);
22610 rtx op0 = XEXP (op, 0);
22611 rtx op1 = XEXP (op, 1);
22612 machine_mode compare_mode = GET_MODE (op0);
22613 machine_mode result_mode = GET_MODE (dest);
22614 bool max_p = false;
22616 if (result_mode != compare_mode)
22617 return 0;
22619 if (code == GE || code == GT)
22620 max_p = true;
22621 else if (code == LE || code == LT)
22622 max_p = false;
22623 else
22624 return 0;
22626 if (rtx_equal_p (op0, true_cond) && rtx_equal_p (op1, false_cond))
22629 else if (rtx_equal_p (op1, true_cond) && rtx_equal_p (op0, false_cond))
22630 max_p = !max_p;
22632 else
22633 return 0;
22635 rs6000_emit_minmax (dest, max_p ? SMAX : SMIN, op0, op1);
22636 return 1;
22639 /* ISA 3.0 (power9) conditional move subcase to emit XSCMP{EQ,GE,GT,NE}DP and
22640 XXSEL instructions for SF/DF scalars. Move TRUE_COND to DEST if OP of the
22641 operands of the last comparison is nonzero/true, FALSE_COND if it is
22642 zero/false. Return 0 if the hardware has no such operation. */
22644 static int
22645 rs6000_emit_p9_fp_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22647 enum rtx_code code = GET_CODE (op);
22648 rtx op0 = XEXP (op, 0);
22649 rtx op1 = XEXP (op, 1);
22650 machine_mode result_mode = GET_MODE (dest);
22651 rtx compare_rtx;
22652 rtx cmove_rtx;
22653 rtx clobber_rtx;
22655 if (!can_create_pseudo_p ())
22656 return 0;
22658 switch (code)
22660 case EQ:
22661 case GE:
22662 case GT:
22663 break;
22665 case NE:
22666 case LT:
22667 case LE:
22668 code = swap_condition (code);
22669 std::swap (op0, op1);
22670 break;
22672 default:
22673 return 0;
22676 /* Generate: [(parallel [(set (dest)
22677 (if_then_else (op (cmp1) (cmp2))
22678 (true)
22679 (false)))
22680 (clobber (scratch))])]. */
22682 compare_rtx = gen_rtx_fmt_ee (code, CCFPmode, op0, op1);
22683 cmove_rtx = gen_rtx_SET (dest,
22684 gen_rtx_IF_THEN_ELSE (result_mode,
22685 compare_rtx,
22686 true_cond,
22687 false_cond));
22689 clobber_rtx = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (V2DImode));
22690 emit_insn (gen_rtx_PARALLEL (VOIDmode,
22691 gen_rtvec (2, cmove_rtx, clobber_rtx)));
22693 return 1;
22696 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
22697 operands of the last comparison is nonzero/true, FALSE_COND if it
22698 is zero/false. Return 0 if the hardware has no such operation. */
22701 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22703 enum rtx_code code = GET_CODE (op);
22704 rtx op0 = XEXP (op, 0);
22705 rtx op1 = XEXP (op, 1);
22706 machine_mode compare_mode = GET_MODE (op0);
22707 machine_mode result_mode = GET_MODE (dest);
22708 rtx temp;
22709 bool is_against_zero;
22711 /* These modes should always match. */
22712 if (GET_MODE (op1) != compare_mode
22713 /* In the isel case however, we can use a compare immediate, so
22714 op1 may be a small constant. */
22715 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
22716 return 0;
22717 if (GET_MODE (true_cond) != result_mode)
22718 return 0;
22719 if (GET_MODE (false_cond) != result_mode)
22720 return 0;
22722 /* See if we can use the ISA 3.0 (power9) min/max/compare functions. */
22723 if (TARGET_P9_MINMAX
22724 && (compare_mode == SFmode || compare_mode == DFmode)
22725 && (result_mode == SFmode || result_mode == DFmode))
22727 if (rs6000_emit_p9_fp_minmax (dest, op, true_cond, false_cond))
22728 return 1;
22730 if (rs6000_emit_p9_fp_cmove (dest, op, true_cond, false_cond))
22731 return 1;
22734 /* Don't allow using floating point comparisons for integer results for
22735 now. */
22736 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
22737 return 0;
22739 /* First, work out if the hardware can do this at all, or
22740 if it's too slow.... */
22741 if (!FLOAT_MODE_P (compare_mode))
22743 if (TARGET_ISEL)
22744 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
22745 return 0;
22748 is_against_zero = op1 == CONST0_RTX (compare_mode);
22750 /* A floating-point subtract might overflow, underflow, or produce
22751 an inexact result, thus changing the floating-point flags, so it
22752 can't be generated if we care about that. It's safe if one side
22753 of the construct is zero, since then no subtract will be
22754 generated. */
22755 if (SCALAR_FLOAT_MODE_P (compare_mode)
22756 && flag_trapping_math && ! is_against_zero)
22757 return 0;
22759 /* Eliminate half of the comparisons by switching operands, this
22760 makes the remaining code simpler. */
22761 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
22762 || code == LTGT || code == LT || code == UNLE)
22764 code = reverse_condition_maybe_unordered (code);
22765 temp = true_cond;
22766 true_cond = false_cond;
22767 false_cond = temp;
22770 /* UNEQ and LTGT take four instructions for a comparison with zero,
22771 it'll probably be faster to use a branch here too. */
22772 if (code == UNEQ && HONOR_NANS (compare_mode))
22773 return 0;
22775 /* We're going to try to implement comparisons by performing
22776 a subtract, then comparing against zero. Unfortunately,
22777 Inf - Inf is NaN which is not zero, and so if we don't
22778 know that the operand is finite and the comparison
22779 would treat EQ different to UNORDERED, we can't do it. */
22780 if (HONOR_INFINITIES (compare_mode)
22781 && code != GT && code != UNGE
22782 && (!CONST_DOUBLE_P (op1)
22783 || real_isinf (CONST_DOUBLE_REAL_VALUE (op1)))
22784 /* Constructs of the form (a OP b ? a : b) are safe. */
22785 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
22786 || (! rtx_equal_p (op0, true_cond)
22787 && ! rtx_equal_p (op1, true_cond))))
22788 return 0;
22790 /* At this point we know we can use fsel. */
22792 /* Reduce the comparison to a comparison against zero. */
22793 if (! is_against_zero)
22795 temp = gen_reg_rtx (compare_mode);
22796 emit_insn (gen_rtx_SET (temp, gen_rtx_MINUS (compare_mode, op0, op1)));
22797 op0 = temp;
22798 op1 = CONST0_RTX (compare_mode);
22801 /* If we don't care about NaNs we can reduce some of the comparisons
22802 down to faster ones. */
22803 if (! HONOR_NANS (compare_mode))
22804 switch (code)
22806 case GT:
22807 code = LE;
22808 temp = true_cond;
22809 true_cond = false_cond;
22810 false_cond = temp;
22811 break;
22812 case UNGE:
22813 code = GE;
22814 break;
22815 case UNEQ:
22816 code = EQ;
22817 break;
22818 default:
22819 break;
22822 /* Now, reduce everything down to a GE. */
22823 switch (code)
22825 case GE:
22826 break;
22828 case LE:
22829 temp = gen_reg_rtx (compare_mode);
22830 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
22831 op0 = temp;
22832 break;
22834 case ORDERED:
22835 temp = gen_reg_rtx (compare_mode);
22836 emit_insn (gen_rtx_SET (temp, gen_rtx_ABS (compare_mode, op0)));
22837 op0 = temp;
22838 break;
22840 case EQ:
22841 temp = gen_reg_rtx (compare_mode);
22842 emit_insn (gen_rtx_SET (temp,
22843 gen_rtx_NEG (compare_mode,
22844 gen_rtx_ABS (compare_mode, op0))));
22845 op0 = temp;
22846 break;
22848 case UNGE:
22849 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
22850 temp = gen_reg_rtx (result_mode);
22851 emit_insn (gen_rtx_SET (temp,
22852 gen_rtx_IF_THEN_ELSE (result_mode,
22853 gen_rtx_GE (VOIDmode,
22854 op0, op1),
22855 true_cond, false_cond)));
22856 false_cond = true_cond;
22857 true_cond = temp;
22859 temp = gen_reg_rtx (compare_mode);
22860 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
22861 op0 = temp;
22862 break;
22864 case GT:
22865 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
22866 temp = gen_reg_rtx (result_mode);
22867 emit_insn (gen_rtx_SET (temp,
22868 gen_rtx_IF_THEN_ELSE (result_mode,
22869 gen_rtx_GE (VOIDmode,
22870 op0, op1),
22871 true_cond, false_cond)));
22872 true_cond = false_cond;
22873 false_cond = temp;
22875 temp = gen_reg_rtx (compare_mode);
22876 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
22877 op0 = temp;
22878 break;
22880 default:
22881 gcc_unreachable ();
22884 emit_insn (gen_rtx_SET (dest,
22885 gen_rtx_IF_THEN_ELSE (result_mode,
22886 gen_rtx_GE (VOIDmode,
22887 op0, op1),
22888 true_cond, false_cond)));
22889 return 1;
22892 /* Same as above, but for ints (isel). */
22895 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22897 rtx condition_rtx, cr;
22898 machine_mode mode = GET_MODE (dest);
22899 enum rtx_code cond_code;
22900 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
22901 bool signedp;
22903 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
22904 return 0;
22906 /* We still have to do the compare, because isel doesn't do a
22907 compare, it just looks at the CRx bits set by a previous compare
22908 instruction. */
22909 condition_rtx = rs6000_generate_compare (op, mode);
22910 cond_code = GET_CODE (condition_rtx);
22911 cr = XEXP (condition_rtx, 0);
22912 signedp = GET_MODE (cr) == CCmode;
22914 isel_func = (mode == SImode
22915 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
22916 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
22918 switch (cond_code)
22920 case LT: case GT: case LTU: case GTU: case EQ:
22921 /* isel handles these directly. */
22922 break;
22924 default:
22925 /* We need to swap the sense of the comparison. */
22927 std::swap (false_cond, true_cond);
22928 PUT_CODE (condition_rtx, reverse_condition (cond_code));
22930 break;
22933 false_cond = force_reg (mode, false_cond);
22934 if (true_cond != const0_rtx)
22935 true_cond = force_reg (mode, true_cond);
22937 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
22939 return 1;
22942 void
22943 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
22945 machine_mode mode = GET_MODE (op0);
22946 enum rtx_code c;
22947 rtx target;
22949 /* VSX/altivec have direct min/max insns. */
22950 if ((code == SMAX || code == SMIN)
22951 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
22952 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
22954 emit_insn (gen_rtx_SET (dest, gen_rtx_fmt_ee (code, mode, op0, op1)));
22955 return;
22958 if (code == SMAX || code == SMIN)
22959 c = GE;
22960 else
22961 c = GEU;
22963 if (code == SMAX || code == UMAX)
22964 target = emit_conditional_move (dest, c, op0, op1, mode,
22965 op0, op1, mode, 0);
22966 else
22967 target = emit_conditional_move (dest, c, op0, op1, mode,
22968 op1, op0, mode, 0);
22969 gcc_assert (target);
22970 if (target != dest)
22971 emit_move_insn (dest, target);
22974 /* A subroutine of the atomic operation splitters. Jump to LABEL if
22975 COND is true. Mark the jump as unlikely to be taken. */
22977 static void
22978 emit_unlikely_jump (rtx cond, rtx label)
22980 rtx x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
22981 rtx_insn *insn = emit_jump_insn (gen_rtx_SET (pc_rtx, x));
22982 add_reg_br_prob_note (insn, profile_probability::very_unlikely ());
22985 /* A subroutine of the atomic operation splitters. Emit a load-locked
22986 instruction in MODE. For QI/HImode, possibly use a pattern than includes
22987 the zero_extend operation. */
22989 static void
22990 emit_load_locked (machine_mode mode, rtx reg, rtx mem)
22992 rtx (*fn) (rtx, rtx) = NULL;
22994 switch (mode)
22996 case E_QImode:
22997 fn = gen_load_lockedqi;
22998 break;
22999 case E_HImode:
23000 fn = gen_load_lockedhi;
23001 break;
23002 case E_SImode:
23003 if (GET_MODE (mem) == QImode)
23004 fn = gen_load_lockedqi_si;
23005 else if (GET_MODE (mem) == HImode)
23006 fn = gen_load_lockedhi_si;
23007 else
23008 fn = gen_load_lockedsi;
23009 break;
23010 case E_DImode:
23011 fn = gen_load_lockeddi;
23012 break;
23013 case E_TImode:
23014 fn = gen_load_lockedti;
23015 break;
23016 default:
23017 gcc_unreachable ();
23019 emit_insn (fn (reg, mem));
23022 /* A subroutine of the atomic operation splitters. Emit a store-conditional
23023 instruction in MODE. */
23025 static void
23026 emit_store_conditional (machine_mode mode, rtx res, rtx mem, rtx val)
23028 rtx (*fn) (rtx, rtx, rtx) = NULL;
23030 switch (mode)
23032 case E_QImode:
23033 fn = gen_store_conditionalqi;
23034 break;
23035 case E_HImode:
23036 fn = gen_store_conditionalhi;
23037 break;
23038 case E_SImode:
23039 fn = gen_store_conditionalsi;
23040 break;
23041 case E_DImode:
23042 fn = gen_store_conditionaldi;
23043 break;
23044 case E_TImode:
23045 fn = gen_store_conditionalti;
23046 break;
23047 default:
23048 gcc_unreachable ();
23051 /* Emit sync before stwcx. to address PPC405 Erratum. */
23052 if (PPC405_ERRATUM77)
23053 emit_insn (gen_hwsync ());
23055 emit_insn (fn (res, mem, val));
23058 /* Expand barriers before and after a load_locked/store_cond sequence. */
23060 static rtx
23061 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
23063 rtx addr = XEXP (mem, 0);
23065 if (!legitimate_indirect_address_p (addr, reload_completed)
23066 && !legitimate_indexed_address_p (addr, reload_completed))
23068 addr = force_reg (Pmode, addr);
23069 mem = replace_equiv_address_nv (mem, addr);
23072 switch (model)
23074 case MEMMODEL_RELAXED:
23075 case MEMMODEL_CONSUME:
23076 case MEMMODEL_ACQUIRE:
23077 break;
23078 case MEMMODEL_RELEASE:
23079 case MEMMODEL_ACQ_REL:
23080 emit_insn (gen_lwsync ());
23081 break;
23082 case MEMMODEL_SEQ_CST:
23083 emit_insn (gen_hwsync ());
23084 break;
23085 default:
23086 gcc_unreachable ();
23088 return mem;
23091 static void
23092 rs6000_post_atomic_barrier (enum memmodel model)
23094 switch (model)
23096 case MEMMODEL_RELAXED:
23097 case MEMMODEL_CONSUME:
23098 case MEMMODEL_RELEASE:
23099 break;
23100 case MEMMODEL_ACQUIRE:
23101 case MEMMODEL_ACQ_REL:
23102 case MEMMODEL_SEQ_CST:
23103 emit_insn (gen_isync ());
23104 break;
23105 default:
23106 gcc_unreachable ();
23110 /* A subroutine of the various atomic expanders. For sub-word operations,
23111 we must adjust things to operate on SImode. Given the original MEM,
23112 return a new aligned memory. Also build and return the quantities by
23113 which to shift and mask. */
23115 static rtx
23116 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
23118 rtx addr, align, shift, mask, mem;
23119 HOST_WIDE_INT shift_mask;
23120 machine_mode mode = GET_MODE (orig_mem);
23122 /* For smaller modes, we have to implement this via SImode. */
23123 shift_mask = (mode == QImode ? 0x18 : 0x10);
23125 addr = XEXP (orig_mem, 0);
23126 addr = force_reg (GET_MODE (addr), addr);
23128 /* Aligned memory containing subword. Generate a new memory. We
23129 do not want any of the existing MEM_ATTR data, as we're now
23130 accessing memory outside the original object. */
23131 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
23132 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23133 mem = gen_rtx_MEM (SImode, align);
23134 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
23135 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
23136 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
23138 /* Shift amount for subword relative to aligned word. */
23139 shift = gen_reg_rtx (SImode);
23140 addr = gen_lowpart (SImode, addr);
23141 rtx tmp = gen_reg_rtx (SImode);
23142 emit_insn (gen_ashlsi3 (tmp, addr, GEN_INT (3)));
23143 emit_insn (gen_andsi3 (shift, tmp, GEN_INT (shift_mask)));
23144 if (BYTES_BIG_ENDIAN)
23145 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
23146 shift, 1, OPTAB_LIB_WIDEN);
23147 *pshift = shift;
23149 /* Mask for insertion. */
23150 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
23151 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
23152 *pmask = mask;
23154 return mem;
23157 /* A subroutine of the various atomic expanders. For sub-word operands,
23158 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
23160 static rtx
23161 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
23163 rtx x;
23165 x = gen_reg_rtx (SImode);
23166 emit_insn (gen_rtx_SET (x, gen_rtx_AND (SImode,
23167 gen_rtx_NOT (SImode, mask),
23168 oldval)));
23170 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
23172 return x;
23175 /* A subroutine of the various atomic expanders. For sub-word operands,
23176 extract WIDE to NARROW via SHIFT. */
23178 static void
23179 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
23181 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
23182 wide, 1, OPTAB_LIB_WIDEN);
23183 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
23186 /* Expand an atomic compare and swap operation. */
23188 void
23189 rs6000_expand_atomic_compare_and_swap (rtx operands[])
23191 rtx boolval, retval, mem, oldval, newval, cond;
23192 rtx label1, label2, x, mask, shift;
23193 machine_mode mode, orig_mode;
23194 enum memmodel mod_s, mod_f;
23195 bool is_weak;
23197 boolval = operands[0];
23198 retval = operands[1];
23199 mem = operands[2];
23200 oldval = operands[3];
23201 newval = operands[4];
23202 is_weak = (INTVAL (operands[5]) != 0);
23203 mod_s = memmodel_base (INTVAL (operands[6]));
23204 mod_f = memmodel_base (INTVAL (operands[7]));
23205 orig_mode = mode = GET_MODE (mem);
23207 mask = shift = NULL_RTX;
23208 if (mode == QImode || mode == HImode)
23210 /* Before power8, we didn't have access to lbarx/lharx, so generate a
23211 lwarx and shift/mask operations. With power8, we need to do the
23212 comparison in SImode, but the store is still done in QI/HImode. */
23213 oldval = convert_modes (SImode, mode, oldval, 1);
23215 if (!TARGET_SYNC_HI_QI)
23217 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23219 /* Shift and mask OLDVAL into position with the word. */
23220 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
23221 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23223 /* Shift and mask NEWVAL into position within the word. */
23224 newval = convert_modes (SImode, mode, newval, 1);
23225 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
23226 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23229 /* Prepare to adjust the return value. */
23230 retval = gen_reg_rtx (SImode);
23231 mode = SImode;
23233 else if (reg_overlap_mentioned_p (retval, oldval))
23234 oldval = copy_to_reg (oldval);
23236 if (mode != TImode && !reg_or_short_operand (oldval, mode))
23237 oldval = copy_to_mode_reg (mode, oldval);
23239 if (reg_overlap_mentioned_p (retval, newval))
23240 newval = copy_to_reg (newval);
23242 mem = rs6000_pre_atomic_barrier (mem, mod_s);
23244 label1 = NULL_RTX;
23245 if (!is_weak)
23247 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23248 emit_label (XEXP (label1, 0));
23250 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23252 emit_load_locked (mode, retval, mem);
23254 x = retval;
23255 if (mask)
23256 x = expand_simple_binop (SImode, AND, retval, mask,
23257 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23259 cond = gen_reg_rtx (CCmode);
23260 /* If we have TImode, synthesize a comparison. */
23261 if (mode != TImode)
23262 x = gen_rtx_COMPARE (CCmode, x, oldval);
23263 else
23265 rtx xor1_result = gen_reg_rtx (DImode);
23266 rtx xor2_result = gen_reg_rtx (DImode);
23267 rtx or_result = gen_reg_rtx (DImode);
23268 rtx new_word0 = simplify_gen_subreg (DImode, x, TImode, 0);
23269 rtx new_word1 = simplify_gen_subreg (DImode, x, TImode, 8);
23270 rtx old_word0 = simplify_gen_subreg (DImode, oldval, TImode, 0);
23271 rtx old_word1 = simplify_gen_subreg (DImode, oldval, TImode, 8);
23273 emit_insn (gen_xordi3 (xor1_result, new_word0, old_word0));
23274 emit_insn (gen_xordi3 (xor2_result, new_word1, old_word1));
23275 emit_insn (gen_iordi3 (or_result, xor1_result, xor2_result));
23276 x = gen_rtx_COMPARE (CCmode, or_result, const0_rtx);
23279 emit_insn (gen_rtx_SET (cond, x));
23281 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23282 emit_unlikely_jump (x, label2);
23284 x = newval;
23285 if (mask)
23286 x = rs6000_mask_atomic_subword (retval, newval, mask);
23288 emit_store_conditional (orig_mode, cond, mem, x);
23290 if (!is_weak)
23292 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23293 emit_unlikely_jump (x, label1);
23296 if (!is_mm_relaxed (mod_f))
23297 emit_label (XEXP (label2, 0));
23299 rs6000_post_atomic_barrier (mod_s);
23301 if (is_mm_relaxed (mod_f))
23302 emit_label (XEXP (label2, 0));
23304 if (shift)
23305 rs6000_finish_atomic_subword (operands[1], retval, shift);
23306 else if (mode != GET_MODE (operands[1]))
23307 convert_move (operands[1], retval, 1);
23309 /* In all cases, CR0 contains EQ on success, and NE on failure. */
23310 x = gen_rtx_EQ (SImode, cond, const0_rtx);
23311 emit_insn (gen_rtx_SET (boolval, x));
23314 /* Expand an atomic exchange operation. */
23316 void
23317 rs6000_expand_atomic_exchange (rtx operands[])
23319 rtx retval, mem, val, cond;
23320 machine_mode mode;
23321 enum memmodel model;
23322 rtx label, x, mask, shift;
23324 retval = operands[0];
23325 mem = operands[1];
23326 val = operands[2];
23327 model = memmodel_base (INTVAL (operands[3]));
23328 mode = GET_MODE (mem);
23330 mask = shift = NULL_RTX;
23331 if (!TARGET_SYNC_HI_QI && (mode == QImode || mode == HImode))
23333 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23335 /* Shift and mask VAL into position with the word. */
23336 val = convert_modes (SImode, mode, val, 1);
23337 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23338 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23340 /* Prepare to adjust the return value. */
23341 retval = gen_reg_rtx (SImode);
23342 mode = SImode;
23345 mem = rs6000_pre_atomic_barrier (mem, model);
23347 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23348 emit_label (XEXP (label, 0));
23350 emit_load_locked (mode, retval, mem);
23352 x = val;
23353 if (mask)
23354 x = rs6000_mask_atomic_subword (retval, val, mask);
23356 cond = gen_reg_rtx (CCmode);
23357 emit_store_conditional (mode, cond, mem, x);
23359 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23360 emit_unlikely_jump (x, label);
23362 rs6000_post_atomic_barrier (model);
23364 if (shift)
23365 rs6000_finish_atomic_subword (operands[0], retval, shift);
23368 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
23369 to perform. MEM is the memory on which to operate. VAL is the second
23370 operand of the binary operator. BEFORE and AFTER are optional locations to
23371 return the value of MEM either before of after the operation. MODEL_RTX
23372 is a CONST_INT containing the memory model to use. */
23374 void
23375 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
23376 rtx orig_before, rtx orig_after, rtx model_rtx)
23378 enum memmodel model = memmodel_base (INTVAL (model_rtx));
23379 machine_mode mode = GET_MODE (mem);
23380 machine_mode store_mode = mode;
23381 rtx label, x, cond, mask, shift;
23382 rtx before = orig_before, after = orig_after;
23384 mask = shift = NULL_RTX;
23385 /* On power8, we want to use SImode for the operation. On previous systems,
23386 use the operation in a subword and shift/mask to get the proper byte or
23387 halfword. */
23388 if (mode == QImode || mode == HImode)
23390 if (TARGET_SYNC_HI_QI)
23392 val = convert_modes (SImode, mode, val, 1);
23394 /* Prepare to adjust the return value. */
23395 before = gen_reg_rtx (SImode);
23396 if (after)
23397 after = gen_reg_rtx (SImode);
23398 mode = SImode;
23400 else
23402 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23404 /* Shift and mask VAL into position with the word. */
23405 val = convert_modes (SImode, mode, val, 1);
23406 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23407 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23409 switch (code)
23411 case IOR:
23412 case XOR:
23413 /* We've already zero-extended VAL. That is sufficient to
23414 make certain that it does not affect other bits. */
23415 mask = NULL;
23416 break;
23418 case AND:
23419 /* If we make certain that all of the other bits in VAL are
23420 set, that will be sufficient to not affect other bits. */
23421 x = gen_rtx_NOT (SImode, mask);
23422 x = gen_rtx_IOR (SImode, x, val);
23423 emit_insn (gen_rtx_SET (val, x));
23424 mask = NULL;
23425 break;
23427 case NOT:
23428 case PLUS:
23429 case MINUS:
23430 /* These will all affect bits outside the field and need
23431 adjustment via MASK within the loop. */
23432 break;
23434 default:
23435 gcc_unreachable ();
23438 /* Prepare to adjust the return value. */
23439 before = gen_reg_rtx (SImode);
23440 if (after)
23441 after = gen_reg_rtx (SImode);
23442 store_mode = mode = SImode;
23446 mem = rs6000_pre_atomic_barrier (mem, model);
23448 label = gen_label_rtx ();
23449 emit_label (label);
23450 label = gen_rtx_LABEL_REF (VOIDmode, label);
23452 if (before == NULL_RTX)
23453 before = gen_reg_rtx (mode);
23455 emit_load_locked (mode, before, mem);
23457 if (code == NOT)
23459 x = expand_simple_binop (mode, AND, before, val,
23460 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23461 after = expand_simple_unop (mode, NOT, x, after, 1);
23463 else
23465 after = expand_simple_binop (mode, code, before, val,
23466 after, 1, OPTAB_LIB_WIDEN);
23469 x = after;
23470 if (mask)
23472 x = expand_simple_binop (SImode, AND, after, mask,
23473 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23474 x = rs6000_mask_atomic_subword (before, x, mask);
23476 else if (store_mode != mode)
23477 x = convert_modes (store_mode, mode, x, 1);
23479 cond = gen_reg_rtx (CCmode);
23480 emit_store_conditional (store_mode, cond, mem, x);
23482 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23483 emit_unlikely_jump (x, label);
23485 rs6000_post_atomic_barrier (model);
23487 if (shift)
23489 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
23490 then do the calcuations in a SImode register. */
23491 if (orig_before)
23492 rs6000_finish_atomic_subword (orig_before, before, shift);
23493 if (orig_after)
23494 rs6000_finish_atomic_subword (orig_after, after, shift);
23496 else if (store_mode != mode)
23498 /* QImode/HImode on machines with lbarx/lharx where we do the native
23499 operation and then do the calcuations in a SImode register. */
23500 if (orig_before)
23501 convert_move (orig_before, before, 1);
23502 if (orig_after)
23503 convert_move (orig_after, after, 1);
23505 else if (orig_after && after != orig_after)
23506 emit_move_insn (orig_after, after);
23509 /* Emit instructions to move SRC to DST. Called by splitters for
23510 multi-register moves. It will emit at most one instruction for
23511 each register that is accessed; that is, it won't emit li/lis pairs
23512 (or equivalent for 64-bit code). One of SRC or DST must be a hard
23513 register. */
23515 void
23516 rs6000_split_multireg_move (rtx dst, rtx src)
23518 /* The register number of the first register being moved. */
23519 int reg;
23520 /* The mode that is to be moved. */
23521 machine_mode mode;
23522 /* The mode that the move is being done in, and its size. */
23523 machine_mode reg_mode;
23524 int reg_mode_size;
23525 /* The number of registers that will be moved. */
23526 int nregs;
23528 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
23529 mode = GET_MODE (dst);
23530 nregs = hard_regno_nregs (reg, mode);
23531 if (FP_REGNO_P (reg))
23532 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
23533 (TARGET_HARD_FLOAT ? DFmode : SFmode);
23534 else if (ALTIVEC_REGNO_P (reg))
23535 reg_mode = V16QImode;
23536 else
23537 reg_mode = word_mode;
23538 reg_mode_size = GET_MODE_SIZE (reg_mode);
23540 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
23542 /* TDmode residing in FP registers is special, since the ISA requires that
23543 the lower-numbered word of a register pair is always the most significant
23544 word, even in little-endian mode. This does not match the usual subreg
23545 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
23546 the appropriate constituent registers "by hand" in little-endian mode.
23548 Note we do not need to check for destructive overlap here since TDmode
23549 can only reside in even/odd register pairs. */
23550 if (FP_REGNO_P (reg) && DECIMAL_FLOAT_MODE_P (mode) && !BYTES_BIG_ENDIAN)
23552 rtx p_src, p_dst;
23553 int i;
23555 for (i = 0; i < nregs; i++)
23557 if (REG_P (src) && FP_REGNO_P (REGNO (src)))
23558 p_src = gen_rtx_REG (reg_mode, REGNO (src) + nregs - 1 - i);
23559 else
23560 p_src = simplify_gen_subreg (reg_mode, src, mode,
23561 i * reg_mode_size);
23563 if (REG_P (dst) && FP_REGNO_P (REGNO (dst)))
23564 p_dst = gen_rtx_REG (reg_mode, REGNO (dst) + nregs - 1 - i);
23565 else
23566 p_dst = simplify_gen_subreg (reg_mode, dst, mode,
23567 i * reg_mode_size);
23569 emit_insn (gen_rtx_SET (p_dst, p_src));
23572 return;
23575 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
23577 /* Move register range backwards, if we might have destructive
23578 overlap. */
23579 int i;
23580 for (i = nregs - 1; i >= 0; i--)
23581 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23582 i * reg_mode_size),
23583 simplify_gen_subreg (reg_mode, src, mode,
23584 i * reg_mode_size)));
23586 else
23588 int i;
23589 int j = -1;
23590 bool used_update = false;
23591 rtx restore_basereg = NULL_RTX;
23593 if (MEM_P (src) && INT_REGNO_P (reg))
23595 rtx breg;
23597 if (GET_CODE (XEXP (src, 0)) == PRE_INC
23598 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
23600 rtx delta_rtx;
23601 breg = XEXP (XEXP (src, 0), 0);
23602 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
23603 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
23604 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
23605 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23606 src = replace_equiv_address (src, breg);
23608 else if (! rs6000_offsettable_memref_p (src, reg_mode, true))
23610 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
23612 rtx basereg = XEXP (XEXP (src, 0), 0);
23613 if (TARGET_UPDATE)
23615 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
23616 emit_insn (gen_rtx_SET (ndst,
23617 gen_rtx_MEM (reg_mode,
23618 XEXP (src, 0))));
23619 used_update = true;
23621 else
23622 emit_insn (gen_rtx_SET (basereg,
23623 XEXP (XEXP (src, 0), 1)));
23624 src = replace_equiv_address (src, basereg);
23626 else
23628 rtx basereg = gen_rtx_REG (Pmode, reg);
23629 emit_insn (gen_rtx_SET (basereg, XEXP (src, 0)));
23630 src = replace_equiv_address (src, basereg);
23634 breg = XEXP (src, 0);
23635 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
23636 breg = XEXP (breg, 0);
23638 /* If the base register we are using to address memory is
23639 also a destination reg, then change that register last. */
23640 if (REG_P (breg)
23641 && REGNO (breg) >= REGNO (dst)
23642 && REGNO (breg) < REGNO (dst) + nregs)
23643 j = REGNO (breg) - REGNO (dst);
23645 else if (MEM_P (dst) && INT_REGNO_P (reg))
23647 rtx breg;
23649 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
23650 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
23652 rtx delta_rtx;
23653 breg = XEXP (XEXP (dst, 0), 0);
23654 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
23655 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
23656 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
23658 /* We have to update the breg before doing the store.
23659 Use store with update, if available. */
23661 if (TARGET_UPDATE)
23663 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23664 emit_insn (TARGET_32BIT
23665 ? (TARGET_POWERPC64
23666 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
23667 : gen_movsi_si_update (breg, breg, delta_rtx, nsrc))
23668 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
23669 used_update = true;
23671 else
23672 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23673 dst = replace_equiv_address (dst, breg);
23675 else if (!rs6000_offsettable_memref_p (dst, reg_mode, true)
23676 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
23678 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
23680 rtx basereg = XEXP (XEXP (dst, 0), 0);
23681 if (TARGET_UPDATE)
23683 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23684 emit_insn (gen_rtx_SET (gen_rtx_MEM (reg_mode,
23685 XEXP (dst, 0)),
23686 nsrc));
23687 used_update = true;
23689 else
23690 emit_insn (gen_rtx_SET (basereg,
23691 XEXP (XEXP (dst, 0), 1)));
23692 dst = replace_equiv_address (dst, basereg);
23694 else
23696 rtx basereg = XEXP (XEXP (dst, 0), 0);
23697 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
23698 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
23699 && REG_P (basereg)
23700 && REG_P (offsetreg)
23701 && REGNO (basereg) != REGNO (offsetreg));
23702 if (REGNO (basereg) == 0)
23704 rtx tmp = offsetreg;
23705 offsetreg = basereg;
23706 basereg = tmp;
23708 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
23709 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
23710 dst = replace_equiv_address (dst, basereg);
23713 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
23714 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode, true));
23717 for (i = 0; i < nregs; i++)
23719 /* Calculate index to next subword. */
23720 ++j;
23721 if (j == nregs)
23722 j = 0;
23724 /* If compiler already emitted move of first word by
23725 store with update, no need to do anything. */
23726 if (j == 0 && used_update)
23727 continue;
23729 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23730 j * reg_mode_size),
23731 simplify_gen_subreg (reg_mode, src, mode,
23732 j * reg_mode_size)));
23734 if (restore_basereg != NULL_RTX)
23735 emit_insn (restore_basereg);
23739 static GTY(()) alias_set_type set = -1;
23741 alias_set_type
23742 get_TOC_alias_set (void)
23744 if (set == -1)
23745 set = new_alias_set ();
23746 return set;
23749 /* Return the internal arg pointer used for function incoming
23750 arguments. When -fsplit-stack, the arg pointer is r12 so we need
23751 to copy it to a pseudo in order for it to be preserved over calls
23752 and suchlike. We'd really like to use a pseudo here for the
23753 internal arg pointer but data-flow analysis is not prepared to
23754 accept pseudos as live at the beginning of a function. */
23756 static rtx
23757 rs6000_internal_arg_pointer (void)
23759 if (flag_split_stack
23760 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
23761 == NULL))
23764 if (cfun->machine->split_stack_arg_pointer == NULL_RTX)
23766 rtx pat;
23768 cfun->machine->split_stack_arg_pointer = gen_reg_rtx (Pmode);
23769 REG_POINTER (cfun->machine->split_stack_arg_pointer) = 1;
23771 /* Put the pseudo initialization right after the note at the
23772 beginning of the function. */
23773 pat = gen_rtx_SET (cfun->machine->split_stack_arg_pointer,
23774 gen_rtx_REG (Pmode, 12));
23775 push_topmost_sequence ();
23776 emit_insn_after (pat, get_insns ());
23777 pop_topmost_sequence ();
23779 rtx ret = plus_constant (Pmode, cfun->machine->split_stack_arg_pointer,
23780 FIRST_PARM_OFFSET (current_function_decl));
23781 return copy_to_reg (ret);
23783 return virtual_incoming_args_rtx;
23786 /* We may have to tell the dataflow pass that the split stack prologue
23787 is initializing a register. */
23789 static void
23790 rs6000_live_on_entry (bitmap regs)
23792 if (flag_split_stack)
23793 bitmap_set_bit (regs, 12);
23797 /* A C compound statement that outputs the assembler code for a thunk
23798 function, used to implement C++ virtual function calls with
23799 multiple inheritance. The thunk acts as a wrapper around a virtual
23800 function, adjusting the implicit object parameter before handing
23801 control off to the real function.
23803 First, emit code to add the integer DELTA to the location that
23804 contains the incoming first argument. Assume that this argument
23805 contains a pointer, and is the one used to pass the `this' pointer
23806 in C++. This is the incoming argument *before* the function
23807 prologue, e.g. `%o0' on a sparc. The addition must preserve the
23808 values of all other incoming arguments.
23810 After the addition, emit code to jump to FUNCTION, which is a
23811 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
23812 not touch the return address. Hence returning from FUNCTION will
23813 return to whoever called the current `thunk'.
23815 The effect must be as if FUNCTION had been called directly with the
23816 adjusted first argument. This macro is responsible for emitting
23817 all of the code for a thunk function; output_function_prologue()
23818 and output_function_epilogue() are not invoked.
23820 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
23821 been extracted from it.) It might possibly be useful on some
23822 targets, but probably not.
23824 If you do not define this macro, the target-independent code in the
23825 C++ frontend will generate a less efficient heavyweight thunk that
23826 calls FUNCTION instead of jumping to it. The generic approach does
23827 not support varargs. */
23829 static void
23830 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
23831 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
23832 tree function)
23834 const char *fnname = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (thunk_fndecl));
23835 rtx this_rtx, funexp;
23836 rtx_insn *insn;
23838 reload_completed = 1;
23839 epilogue_completed = 1;
23841 /* Mark the end of the (empty) prologue. */
23842 emit_note (NOTE_INSN_PROLOGUE_END);
23844 /* Find the "this" pointer. If the function returns a structure,
23845 the structure return pointer is in r3. */
23846 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
23847 this_rtx = gen_rtx_REG (Pmode, 4);
23848 else
23849 this_rtx = gen_rtx_REG (Pmode, 3);
23851 /* Apply the constant offset, if required. */
23852 if (delta)
23853 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
23855 /* Apply the offset from the vtable, if required. */
23856 if (vcall_offset)
23858 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
23859 rtx tmp = gen_rtx_REG (Pmode, 12);
23861 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
23862 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
23864 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
23865 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
23867 else
23869 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
23871 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
23873 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
23876 /* Generate a tail call to the target function. */
23877 if (!TREE_USED (function))
23879 assemble_external (function);
23880 TREE_USED (function) = 1;
23882 funexp = XEXP (DECL_RTL (function), 0);
23883 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
23885 #if TARGET_MACHO
23886 if (MACHOPIC_INDIRECT)
23887 funexp = machopic_indirect_call_target (funexp);
23888 #endif
23890 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
23891 generate sibcall RTL explicitly. */
23892 insn = emit_call_insn (
23893 gen_rtx_PARALLEL (VOIDmode,
23894 gen_rtvec (3,
23895 gen_rtx_CALL (VOIDmode,
23896 funexp, const0_rtx),
23897 gen_rtx_USE (VOIDmode, const0_rtx),
23898 simple_return_rtx)));
23899 SIBLING_CALL_P (insn) = 1;
23900 emit_barrier ();
23902 /* Run just enough of rest_of_compilation to get the insns emitted.
23903 There's not really enough bulk here to make other passes such as
23904 instruction scheduling worth while. */
23905 insn = get_insns ();
23906 shorten_branches (insn);
23907 assemble_start_function (thunk_fndecl, fnname);
23908 final_start_function (insn, file, 1);
23909 final (insn, file, 1);
23910 final_end_function ();
23911 assemble_end_function (thunk_fndecl, fnname);
23913 reload_completed = 0;
23914 epilogue_completed = 0;
23917 /* A quick summary of the various types of 'constant-pool tables'
23918 under PowerPC:
23920 Target Flags Name One table per
23921 AIX (none) AIX TOC object file
23922 AIX -mfull-toc AIX TOC object file
23923 AIX -mminimal-toc AIX minimal TOC translation unit
23924 SVR4/EABI (none) SVR4 SDATA object file
23925 SVR4/EABI -fpic SVR4 pic object file
23926 SVR4/EABI -fPIC SVR4 PIC translation unit
23927 SVR4/EABI -mrelocatable EABI TOC function
23928 SVR4/EABI -maix AIX TOC object file
23929 SVR4/EABI -maix -mminimal-toc
23930 AIX minimal TOC translation unit
23932 Name Reg. Set by entries contains:
23933 made by addrs? fp? sum?
23935 AIX TOC 2 crt0 as Y option option
23936 AIX minimal TOC 30 prolog gcc Y Y option
23937 SVR4 SDATA 13 crt0 gcc N Y N
23938 SVR4 pic 30 prolog ld Y not yet N
23939 SVR4 PIC 30 prolog gcc Y option option
23940 EABI TOC 30 prolog gcc Y option option
23944 /* Hash functions for the hash table. */
23946 static unsigned
23947 rs6000_hash_constant (rtx k)
23949 enum rtx_code code = GET_CODE (k);
23950 machine_mode mode = GET_MODE (k);
23951 unsigned result = (code << 3) ^ mode;
23952 const char *format;
23953 int flen, fidx;
23955 format = GET_RTX_FORMAT (code);
23956 flen = strlen (format);
23957 fidx = 0;
23959 switch (code)
23961 case LABEL_REF:
23962 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
23964 case CONST_WIDE_INT:
23966 int i;
23967 flen = CONST_WIDE_INT_NUNITS (k);
23968 for (i = 0; i < flen; i++)
23969 result = result * 613 + CONST_WIDE_INT_ELT (k, i);
23970 return result;
23973 case CONST_DOUBLE:
23974 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
23976 case CODE_LABEL:
23977 fidx = 3;
23978 break;
23980 default:
23981 break;
23984 for (; fidx < flen; fidx++)
23985 switch (format[fidx])
23987 case 's':
23989 unsigned i, len;
23990 const char *str = XSTR (k, fidx);
23991 len = strlen (str);
23992 result = result * 613 + len;
23993 for (i = 0; i < len; i++)
23994 result = result * 613 + (unsigned) str[i];
23995 break;
23997 case 'u':
23998 case 'e':
23999 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
24000 break;
24001 case 'i':
24002 case 'n':
24003 result = result * 613 + (unsigned) XINT (k, fidx);
24004 break;
24005 case 'w':
24006 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
24007 result = result * 613 + (unsigned) XWINT (k, fidx);
24008 else
24010 size_t i;
24011 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
24012 result = result * 613 + (unsigned) (XWINT (k, fidx)
24013 >> CHAR_BIT * i);
24015 break;
24016 case '0':
24017 break;
24018 default:
24019 gcc_unreachable ();
24022 return result;
24025 hashval_t
24026 toc_hasher::hash (toc_hash_struct *thc)
24028 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
24031 /* Compare H1 and H2 for equivalence. */
24033 bool
24034 toc_hasher::equal (toc_hash_struct *h1, toc_hash_struct *h2)
24036 rtx r1 = h1->key;
24037 rtx r2 = h2->key;
24039 if (h1->key_mode != h2->key_mode)
24040 return 0;
24042 return rtx_equal_p (r1, r2);
24045 /* These are the names given by the C++ front-end to vtables, and
24046 vtable-like objects. Ideally, this logic should not be here;
24047 instead, there should be some programmatic way of inquiring as
24048 to whether or not an object is a vtable. */
24050 #define VTABLE_NAME_P(NAME) \
24051 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
24052 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
24053 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
24054 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
24055 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
24057 #ifdef NO_DOLLAR_IN_LABEL
24058 /* Return a GGC-allocated character string translating dollar signs in
24059 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
24061 const char *
24062 rs6000_xcoff_strip_dollar (const char *name)
24064 char *strip, *p;
24065 const char *q;
24066 size_t len;
24068 q = (const char *) strchr (name, '$');
24070 if (q == 0 || q == name)
24071 return name;
24073 len = strlen (name);
24074 strip = XALLOCAVEC (char, len + 1);
24075 strcpy (strip, name);
24076 p = strip + (q - name);
24077 while (p)
24079 *p = '_';
24080 p = strchr (p + 1, '$');
24083 return ggc_alloc_string (strip, len);
24085 #endif
24087 void
24088 rs6000_output_symbol_ref (FILE *file, rtx x)
24090 const char *name = XSTR (x, 0);
24092 /* Currently C++ toc references to vtables can be emitted before it
24093 is decided whether the vtable is public or private. If this is
24094 the case, then the linker will eventually complain that there is
24095 a reference to an unknown section. Thus, for vtables only,
24096 we emit the TOC reference to reference the identifier and not the
24097 symbol. */
24098 if (VTABLE_NAME_P (name))
24100 RS6000_OUTPUT_BASENAME (file, name);
24102 else
24103 assemble_name (file, name);
24106 /* Output a TOC entry. We derive the entry name from what is being
24107 written. */
24109 void
24110 output_toc (FILE *file, rtx x, int labelno, machine_mode mode)
24112 char buf[256];
24113 const char *name = buf;
24114 rtx base = x;
24115 HOST_WIDE_INT offset = 0;
24117 gcc_assert (!TARGET_NO_TOC);
24119 /* When the linker won't eliminate them, don't output duplicate
24120 TOC entries (this happens on AIX if there is any kind of TOC,
24121 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
24122 CODE_LABELs. */
24123 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
24125 struct toc_hash_struct *h;
24127 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
24128 time because GGC is not initialized at that point. */
24129 if (toc_hash_table == NULL)
24130 toc_hash_table = hash_table<toc_hasher>::create_ggc (1021);
24132 h = ggc_alloc<toc_hash_struct> ();
24133 h->key = x;
24134 h->key_mode = mode;
24135 h->labelno = labelno;
24137 toc_hash_struct **found = toc_hash_table->find_slot (h, INSERT);
24138 if (*found == NULL)
24139 *found = h;
24140 else /* This is indeed a duplicate.
24141 Set this label equal to that label. */
24143 fputs ("\t.set ", file);
24144 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
24145 fprintf (file, "%d,", labelno);
24146 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
24147 fprintf (file, "%d\n", ((*found)->labelno));
24149 #ifdef HAVE_AS_TLS
24150 if (TARGET_XCOFF && SYMBOL_REF_P (x)
24151 && (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC
24152 || SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC))
24154 fputs ("\t.set ", file);
24155 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
24156 fprintf (file, "%d,", labelno);
24157 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
24158 fprintf (file, "%d\n", ((*found)->labelno));
24160 #endif
24161 return;
24165 /* If we're going to put a double constant in the TOC, make sure it's
24166 aligned properly when strict alignment is on. */
24167 if ((CONST_DOUBLE_P (x) || CONST_WIDE_INT_P (x))
24168 && STRICT_ALIGNMENT
24169 && GET_MODE_BITSIZE (mode) >= 64
24170 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
24171 ASM_OUTPUT_ALIGN (file, 3);
24174 (*targetm.asm_out.internal_label) (file, "LC", labelno);
24176 /* Handle FP constants specially. Note that if we have a minimal
24177 TOC, things we put here aren't actually in the TOC, so we can allow
24178 FP constants. */
24179 if (CONST_DOUBLE_P (x)
24180 && (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode
24181 || GET_MODE (x) == IFmode || GET_MODE (x) == KFmode))
24183 long k[4];
24185 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
24186 REAL_VALUE_TO_TARGET_DECIMAL128 (*CONST_DOUBLE_REAL_VALUE (x), k);
24187 else
24188 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
24190 if (TARGET_64BIT)
24192 if (TARGET_ELF || TARGET_MINIMAL_TOC)
24193 fputs (DOUBLE_INT_ASM_OP, file);
24194 else
24195 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
24196 k[0] & 0xffffffff, k[1] & 0xffffffff,
24197 k[2] & 0xffffffff, k[3] & 0xffffffff);
24198 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
24199 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
24200 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff,
24201 k[WORDS_BIG_ENDIAN ? 2 : 3] & 0xffffffff,
24202 k[WORDS_BIG_ENDIAN ? 3 : 2] & 0xffffffff);
24203 return;
24205 else
24207 if (TARGET_ELF || TARGET_MINIMAL_TOC)
24208 fputs ("\t.long ", file);
24209 else
24210 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
24211 k[0] & 0xffffffff, k[1] & 0xffffffff,
24212 k[2] & 0xffffffff, k[3] & 0xffffffff);
24213 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
24214 k[0] & 0xffffffff, k[1] & 0xffffffff,
24215 k[2] & 0xffffffff, k[3] & 0xffffffff);
24216 return;
24219 else if (CONST_DOUBLE_P (x)
24220 && (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
24222 long k[2];
24224 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
24225 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (x), k);
24226 else
24227 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
24229 if (TARGET_64BIT)
24231 if (TARGET_ELF || TARGET_MINIMAL_TOC)
24232 fputs (DOUBLE_INT_ASM_OP, file);
24233 else
24234 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
24235 k[0] & 0xffffffff, k[1] & 0xffffffff);
24236 fprintf (file, "0x%lx%08lx\n",
24237 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
24238 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff);
24239 return;
24241 else
24243 if (TARGET_ELF || TARGET_MINIMAL_TOC)
24244 fputs ("\t.long ", file);
24245 else
24246 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
24247 k[0] & 0xffffffff, k[1] & 0xffffffff);
24248 fprintf (file, "0x%lx,0x%lx\n",
24249 k[0] & 0xffffffff, k[1] & 0xffffffff);
24250 return;
24253 else if (CONST_DOUBLE_P (x)
24254 && (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
24256 long l;
24258 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
24259 REAL_VALUE_TO_TARGET_DECIMAL32 (*CONST_DOUBLE_REAL_VALUE (x), l);
24260 else
24261 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x), l);
24263 if (TARGET_64BIT)
24265 if (TARGET_ELF || TARGET_MINIMAL_TOC)
24266 fputs (DOUBLE_INT_ASM_OP, file);
24267 else
24268 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
24269 if (WORDS_BIG_ENDIAN)
24270 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
24271 else
24272 fprintf (file, "0x%lx\n", l & 0xffffffff);
24273 return;
24275 else
24277 if (TARGET_ELF || TARGET_MINIMAL_TOC)
24278 fputs ("\t.long ", file);
24279 else
24280 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
24281 fprintf (file, "0x%lx\n", l & 0xffffffff);
24282 return;
24285 else if (GET_MODE (x) == VOIDmode && CONST_INT_P (x))
24287 unsigned HOST_WIDE_INT low;
24288 HOST_WIDE_INT high;
24290 low = INTVAL (x) & 0xffffffff;
24291 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
24293 /* TOC entries are always Pmode-sized, so when big-endian
24294 smaller integer constants in the TOC need to be padded.
24295 (This is still a win over putting the constants in
24296 a separate constant pool, because then we'd have
24297 to have both a TOC entry _and_ the actual constant.)
24299 For a 32-bit target, CONST_INT values are loaded and shifted
24300 entirely within `low' and can be stored in one TOC entry. */
24302 /* It would be easy to make this work, but it doesn't now. */
24303 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
24305 if (WORDS_BIG_ENDIAN && POINTER_SIZE > GET_MODE_BITSIZE (mode))
24307 low |= high << 32;
24308 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
24309 high = (HOST_WIDE_INT) low >> 32;
24310 low &= 0xffffffff;
24313 if (TARGET_64BIT)
24315 if (TARGET_ELF || TARGET_MINIMAL_TOC)
24316 fputs (DOUBLE_INT_ASM_OP, file);
24317 else
24318 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
24319 (long) high & 0xffffffff, (long) low & 0xffffffff);
24320 fprintf (file, "0x%lx%08lx\n",
24321 (long) high & 0xffffffff, (long) low & 0xffffffff);
24322 return;
24324 else
24326 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
24328 if (TARGET_ELF || TARGET_MINIMAL_TOC)
24329 fputs ("\t.long ", file);
24330 else
24331 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
24332 (long) high & 0xffffffff, (long) low & 0xffffffff);
24333 fprintf (file, "0x%lx,0x%lx\n",
24334 (long) high & 0xffffffff, (long) low & 0xffffffff);
24336 else
24338 if (TARGET_ELF || TARGET_MINIMAL_TOC)
24339 fputs ("\t.long ", file);
24340 else
24341 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
24342 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
24344 return;
24348 if (GET_CODE (x) == CONST)
24350 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
24351 && CONST_INT_P (XEXP (XEXP (x, 0), 1)));
24353 base = XEXP (XEXP (x, 0), 0);
24354 offset = INTVAL (XEXP (XEXP (x, 0), 1));
24357 switch (GET_CODE (base))
24359 case SYMBOL_REF:
24360 name = XSTR (base, 0);
24361 break;
24363 case LABEL_REF:
24364 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
24365 CODE_LABEL_NUMBER (XEXP (base, 0)));
24366 break;
24368 case CODE_LABEL:
24369 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
24370 break;
24372 default:
24373 gcc_unreachable ();
24376 if (TARGET_ELF || TARGET_MINIMAL_TOC)
24377 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
24378 else
24380 fputs ("\t.tc ", file);
24381 RS6000_OUTPUT_BASENAME (file, name);
24383 if (offset < 0)
24384 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
24385 else if (offset)
24386 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
24388 /* Mark large TOC symbols on AIX with [TE] so they are mapped
24389 after other TOC symbols, reducing overflow of small TOC access
24390 to [TC] symbols. */
24391 fputs (TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL
24392 ? "[TE]," : "[TC],", file);
24395 /* Currently C++ toc references to vtables can be emitted before it
24396 is decided whether the vtable is public or private. If this is
24397 the case, then the linker will eventually complain that there is
24398 a TOC reference to an unknown section. Thus, for vtables only,
24399 we emit the TOC reference to reference the symbol and not the
24400 section. */
24401 if (VTABLE_NAME_P (name))
24403 RS6000_OUTPUT_BASENAME (file, name);
24404 if (offset < 0)
24405 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
24406 else if (offset > 0)
24407 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
24409 else
24410 output_addr_const (file, x);
24412 #if HAVE_AS_TLS
24413 if (TARGET_XCOFF && SYMBOL_REF_P (base))
24415 switch (SYMBOL_REF_TLS_MODEL (base))
24417 case 0:
24418 break;
24419 case TLS_MODEL_LOCAL_EXEC:
24420 fputs ("@le", file);
24421 break;
24422 case TLS_MODEL_INITIAL_EXEC:
24423 fputs ("@ie", file);
24424 break;
24425 /* Use global-dynamic for local-dynamic. */
24426 case TLS_MODEL_GLOBAL_DYNAMIC:
24427 case TLS_MODEL_LOCAL_DYNAMIC:
24428 putc ('\n', file);
24429 (*targetm.asm_out.internal_label) (file, "LCM", labelno);
24430 fputs ("\t.tc .", file);
24431 RS6000_OUTPUT_BASENAME (file, name);
24432 fputs ("[TC],", file);
24433 output_addr_const (file, x);
24434 fputs ("@m", file);
24435 break;
24436 default:
24437 gcc_unreachable ();
24440 #endif
24442 putc ('\n', file);
24445 /* Output an assembler pseudo-op to write an ASCII string of N characters
24446 starting at P to FILE.
24448 On the RS/6000, we have to do this using the .byte operation and
24449 write out special characters outside the quoted string.
24450 Also, the assembler is broken; very long strings are truncated,
24451 so we must artificially break them up early. */
24453 void
24454 output_ascii (FILE *file, const char *p, int n)
24456 char c;
24457 int i, count_string;
24458 const char *for_string = "\t.byte \"";
24459 const char *for_decimal = "\t.byte ";
24460 const char *to_close = NULL;
24462 count_string = 0;
24463 for (i = 0; i < n; i++)
24465 c = *p++;
24466 if (c >= ' ' && c < 0177)
24468 if (for_string)
24469 fputs (for_string, file);
24470 putc (c, file);
24472 /* Write two quotes to get one. */
24473 if (c == '"')
24475 putc (c, file);
24476 ++count_string;
24479 for_string = NULL;
24480 for_decimal = "\"\n\t.byte ";
24481 to_close = "\"\n";
24482 ++count_string;
24484 if (count_string >= 512)
24486 fputs (to_close, file);
24488 for_string = "\t.byte \"";
24489 for_decimal = "\t.byte ";
24490 to_close = NULL;
24491 count_string = 0;
24494 else
24496 if (for_decimal)
24497 fputs (for_decimal, file);
24498 fprintf (file, "%d", c);
24500 for_string = "\n\t.byte \"";
24501 for_decimal = ", ";
24502 to_close = "\n";
24503 count_string = 0;
24507 /* Now close the string if we have written one. Then end the line. */
24508 if (to_close)
24509 fputs (to_close, file);
24512 /* Generate a unique section name for FILENAME for a section type
24513 represented by SECTION_DESC. Output goes into BUF.
24515 SECTION_DESC can be any string, as long as it is different for each
24516 possible section type.
24518 We name the section in the same manner as xlc. The name begins with an
24519 underscore followed by the filename (after stripping any leading directory
24520 names) with the last period replaced by the string SECTION_DESC. If
24521 FILENAME does not contain a period, SECTION_DESC is appended to the end of
24522 the name. */
24524 void
24525 rs6000_gen_section_name (char **buf, const char *filename,
24526 const char *section_desc)
24528 const char *q, *after_last_slash, *last_period = 0;
24529 char *p;
24530 int len;
24532 after_last_slash = filename;
24533 for (q = filename; *q; q++)
24535 if (*q == '/')
24536 after_last_slash = q + 1;
24537 else if (*q == '.')
24538 last_period = q;
24541 len = strlen (after_last_slash) + strlen (section_desc) + 2;
24542 *buf = (char *) xmalloc (len);
24544 p = *buf;
24545 *p++ = '_';
24547 for (q = after_last_slash; *q; q++)
24549 if (q == last_period)
24551 strcpy (p, section_desc);
24552 p += strlen (section_desc);
24553 break;
24556 else if (ISALNUM (*q))
24557 *p++ = *q;
24560 if (last_period == 0)
24561 strcpy (p, section_desc);
24562 else
24563 *p = '\0';
24566 /* Emit profile function. */
24568 void
24569 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
24571 /* Non-standard profiling for kernels, which just saves LR then calls
24572 _mcount without worrying about arg saves. The idea is to change
24573 the function prologue as little as possible as it isn't easy to
24574 account for arg save/restore code added just for _mcount. */
24575 if (TARGET_PROFILE_KERNEL)
24576 return;
24578 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24580 #ifndef NO_PROFILE_COUNTERS
24581 # define NO_PROFILE_COUNTERS 0
24582 #endif
24583 if (NO_PROFILE_COUNTERS)
24584 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
24585 LCT_NORMAL, VOIDmode);
24586 else
24588 char buf[30];
24589 const char *label_name;
24590 rtx fun;
24592 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
24593 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
24594 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
24596 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
24597 LCT_NORMAL, VOIDmode, fun, Pmode);
24600 else if (DEFAULT_ABI == ABI_DARWIN)
24602 const char *mcount_name = RS6000_MCOUNT;
24603 int caller_addr_regno = LR_REGNO;
24605 /* Be conservative and always set this, at least for now. */
24606 crtl->uses_pic_offset_table = 1;
24608 #if TARGET_MACHO
24609 /* For PIC code, set up a stub and collect the caller's address
24610 from r0, which is where the prologue puts it. */
24611 if (MACHOPIC_INDIRECT
24612 && crtl->uses_pic_offset_table)
24613 caller_addr_regno = 0;
24614 #endif
24615 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
24616 LCT_NORMAL, VOIDmode,
24617 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
24621 /* Write function profiler code. */
24623 void
24624 output_function_profiler (FILE *file, int labelno)
24626 char buf[100];
24628 switch (DEFAULT_ABI)
24630 default:
24631 gcc_unreachable ();
24633 case ABI_V4:
24634 if (!TARGET_32BIT)
24636 warning (0, "no profiling of 64-bit code for this ABI");
24637 return;
24639 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
24640 fprintf (file, "\tmflr %s\n", reg_names[0]);
24641 if (NO_PROFILE_COUNTERS)
24643 asm_fprintf (file, "\tstw %s,4(%s)\n",
24644 reg_names[0], reg_names[1]);
24646 else if (TARGET_SECURE_PLT && flag_pic)
24648 if (TARGET_LINK_STACK)
24650 char name[32];
24651 get_ppc476_thunk_name (name);
24652 asm_fprintf (file, "\tbl %s\n", name);
24654 else
24655 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
24656 asm_fprintf (file, "\tstw %s,4(%s)\n",
24657 reg_names[0], reg_names[1]);
24658 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
24659 asm_fprintf (file, "\taddis %s,%s,",
24660 reg_names[12], reg_names[12]);
24661 assemble_name (file, buf);
24662 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
24663 assemble_name (file, buf);
24664 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
24666 else if (flag_pic == 1)
24668 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
24669 asm_fprintf (file, "\tstw %s,4(%s)\n",
24670 reg_names[0], reg_names[1]);
24671 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
24672 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
24673 assemble_name (file, buf);
24674 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
24676 else if (flag_pic > 1)
24678 asm_fprintf (file, "\tstw %s,4(%s)\n",
24679 reg_names[0], reg_names[1]);
24680 /* Now, we need to get the address of the label. */
24681 if (TARGET_LINK_STACK)
24683 char name[32];
24684 get_ppc476_thunk_name (name);
24685 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
24686 assemble_name (file, buf);
24687 fputs ("-.\n1:", file);
24688 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
24689 asm_fprintf (file, "\taddi %s,%s,4\n",
24690 reg_names[11], reg_names[11]);
24692 else
24694 fputs ("\tbcl 20,31,1f\n\t.long ", file);
24695 assemble_name (file, buf);
24696 fputs ("-.\n1:", file);
24697 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
24699 asm_fprintf (file, "\tlwz %s,0(%s)\n",
24700 reg_names[0], reg_names[11]);
24701 asm_fprintf (file, "\tadd %s,%s,%s\n",
24702 reg_names[0], reg_names[0], reg_names[11]);
24704 else
24706 asm_fprintf (file, "\tlis %s,", reg_names[12]);
24707 assemble_name (file, buf);
24708 fputs ("@ha\n", file);
24709 asm_fprintf (file, "\tstw %s,4(%s)\n",
24710 reg_names[0], reg_names[1]);
24711 asm_fprintf (file, "\tla %s,", reg_names[0]);
24712 assemble_name (file, buf);
24713 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
24716 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
24717 fprintf (file, "\tbl %s%s\n",
24718 RS6000_MCOUNT, flag_pic ? "@plt" : "");
24719 break;
24721 case ABI_AIX:
24722 case ABI_ELFv2:
24723 case ABI_DARWIN:
24724 /* Don't do anything, done in output_profile_hook (). */
24725 break;
24731 /* The following variable value is the last issued insn. */
24733 static rtx_insn *last_scheduled_insn;
24735 /* The following variable helps to balance issuing of load and
24736 store instructions */
24738 static int load_store_pendulum;
24740 /* The following variable helps pair divide insns during scheduling. */
24741 static int divide_cnt;
24742 /* The following variable helps pair and alternate vector and vector load
24743 insns during scheduling. */
24744 static int vec_pairing;
24747 /* Power4 load update and store update instructions are cracked into a
24748 load or store and an integer insn which are executed in the same cycle.
24749 Branches have their own dispatch slot which does not count against the
24750 GCC issue rate, but it changes the program flow so there are no other
24751 instructions to issue in this cycle. */
24753 static int
24754 rs6000_variable_issue_1 (rtx_insn *insn, int more)
24756 last_scheduled_insn = insn;
24757 if (GET_CODE (PATTERN (insn)) == USE
24758 || GET_CODE (PATTERN (insn)) == CLOBBER)
24760 cached_can_issue_more = more;
24761 return cached_can_issue_more;
24764 if (insn_terminates_group_p (insn, current_group))
24766 cached_can_issue_more = 0;
24767 return cached_can_issue_more;
24770 /* If no reservation, but reach here */
24771 if (recog_memoized (insn) < 0)
24772 return more;
24774 if (rs6000_sched_groups)
24776 if (is_microcoded_insn (insn))
24777 cached_can_issue_more = 0;
24778 else if (is_cracked_insn (insn))
24779 cached_can_issue_more = more > 2 ? more - 2 : 0;
24780 else
24781 cached_can_issue_more = more - 1;
24783 return cached_can_issue_more;
24786 if (rs6000_tune == PROCESSOR_CELL && is_nonpipeline_insn (insn))
24787 return 0;
24789 cached_can_issue_more = more - 1;
24790 return cached_can_issue_more;
24793 static int
24794 rs6000_variable_issue (FILE *stream, int verbose, rtx_insn *insn, int more)
24796 int r = rs6000_variable_issue_1 (insn, more);
24797 if (verbose)
24798 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
24799 return r;
24802 /* Adjust the cost of a scheduling dependency. Return the new cost of
24803 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
24805 static int
24806 rs6000_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
24807 unsigned int)
24809 enum attr_type attr_type;
24811 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
24812 return cost;
24814 switch (dep_type)
24816 case REG_DEP_TRUE:
24818 /* Data dependency; DEP_INSN writes a register that INSN reads
24819 some cycles later. */
24821 /* Separate a load from a narrower, dependent store. */
24822 if ((rs6000_sched_groups || rs6000_tune == PROCESSOR_POWER9
24823 || rs6000_tune == PROCESSOR_FUTURE)
24824 && GET_CODE (PATTERN (insn)) == SET
24825 && GET_CODE (PATTERN (dep_insn)) == SET
24826 && MEM_P (XEXP (PATTERN (insn), 1))
24827 && MEM_P (XEXP (PATTERN (dep_insn), 0))
24828 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
24829 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
24830 return cost + 14;
24832 attr_type = get_attr_type (insn);
24834 switch (attr_type)
24836 case TYPE_JMPREG:
24837 /* Tell the first scheduling pass about the latency between
24838 a mtctr and bctr (and mtlr and br/blr). The first
24839 scheduling pass will not know about this latency since
24840 the mtctr instruction, which has the latency associated
24841 to it, will be generated by reload. */
24842 return 4;
24843 case TYPE_BRANCH:
24844 /* Leave some extra cycles between a compare and its
24845 dependent branch, to inhibit expensive mispredicts. */
24846 if ((rs6000_tune == PROCESSOR_PPC603
24847 || rs6000_tune == PROCESSOR_PPC604
24848 || rs6000_tune == PROCESSOR_PPC604e
24849 || rs6000_tune == PROCESSOR_PPC620
24850 || rs6000_tune == PROCESSOR_PPC630
24851 || rs6000_tune == PROCESSOR_PPC750
24852 || rs6000_tune == PROCESSOR_PPC7400
24853 || rs6000_tune == PROCESSOR_PPC7450
24854 || rs6000_tune == PROCESSOR_PPCE5500
24855 || rs6000_tune == PROCESSOR_PPCE6500
24856 || rs6000_tune == PROCESSOR_POWER4
24857 || rs6000_tune == PROCESSOR_POWER5
24858 || rs6000_tune == PROCESSOR_POWER7
24859 || rs6000_tune == PROCESSOR_POWER8
24860 || rs6000_tune == PROCESSOR_POWER9
24861 || rs6000_tune == PROCESSOR_FUTURE
24862 || rs6000_tune == PROCESSOR_CELL)
24863 && recog_memoized (dep_insn)
24864 && (INSN_CODE (dep_insn) >= 0))
24866 switch (get_attr_type (dep_insn))
24868 case TYPE_CMP:
24869 case TYPE_FPCOMPARE:
24870 case TYPE_CR_LOGICAL:
24871 return cost + 2;
24872 case TYPE_EXTS:
24873 case TYPE_MUL:
24874 if (get_attr_dot (dep_insn) == DOT_YES)
24875 return cost + 2;
24876 else
24877 break;
24878 case TYPE_SHIFT:
24879 if (get_attr_dot (dep_insn) == DOT_YES
24880 && get_attr_var_shift (dep_insn) == VAR_SHIFT_NO)
24881 return cost + 2;
24882 else
24883 break;
24884 default:
24885 break;
24887 break;
24889 case TYPE_STORE:
24890 case TYPE_FPSTORE:
24891 if ((rs6000_tune == PROCESSOR_POWER6)
24892 && recog_memoized (dep_insn)
24893 && (INSN_CODE (dep_insn) >= 0))
24896 if (GET_CODE (PATTERN (insn)) != SET)
24897 /* If this happens, we have to extend this to schedule
24898 optimally. Return default for now. */
24899 return cost;
24901 /* Adjust the cost for the case where the value written
24902 by a fixed point operation is used as the address
24903 gen value on a store. */
24904 switch (get_attr_type (dep_insn))
24906 case TYPE_LOAD:
24907 case TYPE_CNTLZ:
24909 if (! rs6000_store_data_bypass_p (dep_insn, insn))
24910 return get_attr_sign_extend (dep_insn)
24911 == SIGN_EXTEND_YES ? 6 : 4;
24912 break;
24914 case TYPE_SHIFT:
24916 if (! rs6000_store_data_bypass_p (dep_insn, insn))
24917 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
24918 6 : 3;
24919 break;
24921 case TYPE_INTEGER:
24922 case TYPE_ADD:
24923 case TYPE_LOGICAL:
24924 case TYPE_EXTS:
24925 case TYPE_INSERT:
24927 if (! rs6000_store_data_bypass_p (dep_insn, insn))
24928 return 3;
24929 break;
24931 case TYPE_STORE:
24932 case TYPE_FPLOAD:
24933 case TYPE_FPSTORE:
24935 if (get_attr_update (dep_insn) == UPDATE_YES
24936 && ! rs6000_store_data_bypass_p (dep_insn, insn))
24937 return 3;
24938 break;
24940 case TYPE_MUL:
24942 if (! rs6000_store_data_bypass_p (dep_insn, insn))
24943 return 17;
24944 break;
24946 case TYPE_DIV:
24948 if (! rs6000_store_data_bypass_p (dep_insn, insn))
24949 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
24950 break;
24952 default:
24953 break;
24956 break;
24958 case TYPE_LOAD:
24959 if ((rs6000_tune == PROCESSOR_POWER6)
24960 && recog_memoized (dep_insn)
24961 && (INSN_CODE (dep_insn) >= 0))
24964 /* Adjust the cost for the case where the value written
24965 by a fixed point instruction is used within the address
24966 gen portion of a subsequent load(u)(x) */
24967 switch (get_attr_type (dep_insn))
24969 case TYPE_LOAD:
24970 case TYPE_CNTLZ:
24972 if (set_to_load_agen (dep_insn, insn))
24973 return get_attr_sign_extend (dep_insn)
24974 == SIGN_EXTEND_YES ? 6 : 4;
24975 break;
24977 case TYPE_SHIFT:
24979 if (set_to_load_agen (dep_insn, insn))
24980 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
24981 6 : 3;
24982 break;
24984 case TYPE_INTEGER:
24985 case TYPE_ADD:
24986 case TYPE_LOGICAL:
24987 case TYPE_EXTS:
24988 case TYPE_INSERT:
24990 if (set_to_load_agen (dep_insn, insn))
24991 return 3;
24992 break;
24994 case TYPE_STORE:
24995 case TYPE_FPLOAD:
24996 case TYPE_FPSTORE:
24998 if (get_attr_update (dep_insn) == UPDATE_YES
24999 && set_to_load_agen (dep_insn, insn))
25000 return 3;
25001 break;
25003 case TYPE_MUL:
25005 if (set_to_load_agen (dep_insn, insn))
25006 return 17;
25007 break;
25009 case TYPE_DIV:
25011 if (set_to_load_agen (dep_insn, insn))
25012 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
25013 break;
25015 default:
25016 break;
25019 break;
25021 case TYPE_FPLOAD:
25022 if ((rs6000_tune == PROCESSOR_POWER6)
25023 && get_attr_update (insn) == UPDATE_NO
25024 && recog_memoized (dep_insn)
25025 && (INSN_CODE (dep_insn) >= 0)
25026 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
25027 return 2;
25029 default:
25030 break;
25033 /* Fall out to return default cost. */
25035 break;
25037 case REG_DEP_OUTPUT:
25038 /* Output dependency; DEP_INSN writes a register that INSN writes some
25039 cycles later. */
25040 if ((rs6000_tune == PROCESSOR_POWER6)
25041 && recog_memoized (dep_insn)
25042 && (INSN_CODE (dep_insn) >= 0))
25044 attr_type = get_attr_type (insn);
25046 switch (attr_type)
25048 case TYPE_FP:
25049 case TYPE_FPSIMPLE:
25050 if (get_attr_type (dep_insn) == TYPE_FP
25051 || get_attr_type (dep_insn) == TYPE_FPSIMPLE)
25052 return 1;
25053 break;
25054 case TYPE_FPLOAD:
25055 if (get_attr_update (insn) == UPDATE_NO
25056 && get_attr_type (dep_insn) == TYPE_MFFGPR)
25057 return 2;
25058 break;
25059 default:
25060 break;
25063 /* Fall through, no cost for output dependency. */
25064 /* FALLTHRU */
25066 case REG_DEP_ANTI:
25067 /* Anti dependency; DEP_INSN reads a register that INSN writes some
25068 cycles later. */
25069 return 0;
25071 default:
25072 gcc_unreachable ();
25075 return cost;
25078 /* Debug version of rs6000_adjust_cost. */
25080 static int
25081 rs6000_debug_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn,
25082 int cost, unsigned int dw)
25084 int ret = rs6000_adjust_cost (insn, dep_type, dep_insn, cost, dw);
25086 if (ret != cost)
25088 const char *dep;
25090 switch (dep_type)
25092 default: dep = "unknown depencency"; break;
25093 case REG_DEP_TRUE: dep = "data dependency"; break;
25094 case REG_DEP_OUTPUT: dep = "output dependency"; break;
25095 case REG_DEP_ANTI: dep = "anti depencency"; break;
25098 fprintf (stderr,
25099 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
25100 "%s, insn:\n", ret, cost, dep);
25102 debug_rtx (insn);
25105 return ret;
25108 /* The function returns a true if INSN is microcoded.
25109 Return false otherwise. */
25111 static bool
25112 is_microcoded_insn (rtx_insn *insn)
25114 if (!insn || !NONDEBUG_INSN_P (insn)
25115 || GET_CODE (PATTERN (insn)) == USE
25116 || GET_CODE (PATTERN (insn)) == CLOBBER)
25117 return false;
25119 if (rs6000_tune == PROCESSOR_CELL)
25120 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
25122 if (rs6000_sched_groups
25123 && (rs6000_tune == PROCESSOR_POWER4 || rs6000_tune == PROCESSOR_POWER5))
25125 enum attr_type type = get_attr_type (insn);
25126 if ((type == TYPE_LOAD
25127 && get_attr_update (insn) == UPDATE_YES
25128 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES)
25129 || ((type == TYPE_LOAD || type == TYPE_STORE)
25130 && get_attr_update (insn) == UPDATE_YES
25131 && get_attr_indexed (insn) == INDEXED_YES)
25132 || type == TYPE_MFCR)
25133 return true;
25136 return false;
25139 /* The function returns true if INSN is cracked into 2 instructions
25140 by the processor (and therefore occupies 2 issue slots). */
25142 static bool
25143 is_cracked_insn (rtx_insn *insn)
25145 if (!insn || !NONDEBUG_INSN_P (insn)
25146 || GET_CODE (PATTERN (insn)) == USE
25147 || GET_CODE (PATTERN (insn)) == CLOBBER)
25148 return false;
25150 if (rs6000_sched_groups
25151 && (rs6000_tune == PROCESSOR_POWER4 || rs6000_tune == PROCESSOR_POWER5))
25153 enum attr_type type = get_attr_type (insn);
25154 if ((type == TYPE_LOAD
25155 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES
25156 && get_attr_update (insn) == UPDATE_NO)
25157 || (type == TYPE_LOAD
25158 && get_attr_sign_extend (insn) == SIGN_EXTEND_NO
25159 && get_attr_update (insn) == UPDATE_YES
25160 && get_attr_indexed (insn) == INDEXED_NO)
25161 || (type == TYPE_STORE
25162 && get_attr_update (insn) == UPDATE_YES
25163 && get_attr_indexed (insn) == INDEXED_NO)
25164 || ((type == TYPE_FPLOAD || type == TYPE_FPSTORE)
25165 && get_attr_update (insn) == UPDATE_YES)
25166 || (type == TYPE_CR_LOGICAL
25167 && get_attr_cr_logical_3op (insn) == CR_LOGICAL_3OP_YES)
25168 || (type == TYPE_EXTS
25169 && get_attr_dot (insn) == DOT_YES)
25170 || (type == TYPE_SHIFT
25171 && get_attr_dot (insn) == DOT_YES
25172 && get_attr_var_shift (insn) == VAR_SHIFT_NO)
25173 || (type == TYPE_MUL
25174 && get_attr_dot (insn) == DOT_YES)
25175 || type == TYPE_DIV
25176 || (type == TYPE_INSERT
25177 && get_attr_size (insn) == SIZE_32))
25178 return true;
25181 return false;
25184 /* The function returns true if INSN can be issued only from
25185 the branch slot. */
25187 static bool
25188 is_branch_slot_insn (rtx_insn *insn)
25190 if (!insn || !NONDEBUG_INSN_P (insn)
25191 || GET_CODE (PATTERN (insn)) == USE
25192 || GET_CODE (PATTERN (insn)) == CLOBBER)
25193 return false;
25195 if (rs6000_sched_groups)
25197 enum attr_type type = get_attr_type (insn);
25198 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
25199 return true;
25200 return false;
25203 return false;
25206 /* The function returns true if out_inst sets a value that is
25207 used in the address generation computation of in_insn */
25208 static bool
25209 set_to_load_agen (rtx_insn *out_insn, rtx_insn *in_insn)
25211 rtx out_set, in_set;
25213 /* For performance reasons, only handle the simple case where
25214 both loads are a single_set. */
25215 out_set = single_set (out_insn);
25216 if (out_set)
25218 in_set = single_set (in_insn);
25219 if (in_set)
25220 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
25223 return false;
25226 /* Try to determine base/offset/size parts of the given MEM.
25227 Return true if successful, false if all the values couldn't
25228 be determined.
25230 This function only looks for REG or REG+CONST address forms.
25231 REG+REG address form will return false. */
25233 static bool
25234 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
25235 HOST_WIDE_INT *size)
25237 rtx addr_rtx;
25238 if MEM_SIZE_KNOWN_P (mem)
25239 *size = MEM_SIZE (mem);
25240 else
25241 return false;
25243 addr_rtx = (XEXP (mem, 0));
25244 if (GET_CODE (addr_rtx) == PRE_MODIFY)
25245 addr_rtx = XEXP (addr_rtx, 1);
25247 *offset = 0;
25248 while (GET_CODE (addr_rtx) == PLUS
25249 && CONST_INT_P (XEXP (addr_rtx, 1)))
25251 *offset += INTVAL (XEXP (addr_rtx, 1));
25252 addr_rtx = XEXP (addr_rtx, 0);
25254 if (!REG_P (addr_rtx))
25255 return false;
25257 *base = addr_rtx;
25258 return true;
25261 /* The function returns true if the target storage location of
25262 mem1 is adjacent to the target storage location of mem2 */
25263 /* Return 1 if memory locations are adjacent. */
25265 static bool
25266 adjacent_mem_locations (rtx mem1, rtx mem2)
25268 rtx reg1, reg2;
25269 HOST_WIDE_INT off1, size1, off2, size2;
25271 if (get_memref_parts (mem1, &reg1, &off1, &size1)
25272 && get_memref_parts (mem2, &reg2, &off2, &size2))
25273 return ((REGNO (reg1) == REGNO (reg2))
25274 && ((off1 + size1 == off2)
25275 || (off2 + size2 == off1)));
25277 return false;
25280 /* This function returns true if it can be determined that the two MEM
25281 locations overlap by at least 1 byte based on base reg/offset/size. */
25283 static bool
25284 mem_locations_overlap (rtx mem1, rtx mem2)
25286 rtx reg1, reg2;
25287 HOST_WIDE_INT off1, size1, off2, size2;
25289 if (get_memref_parts (mem1, &reg1, &off1, &size1)
25290 && get_memref_parts (mem2, &reg2, &off2, &size2))
25291 return ((REGNO (reg1) == REGNO (reg2))
25292 && (((off1 <= off2) && (off1 + size1 > off2))
25293 || ((off2 <= off1) && (off2 + size2 > off1))));
25295 return false;
25298 /* A C statement (sans semicolon) to update the integer scheduling
25299 priority INSN_PRIORITY (INSN). Increase the priority to execute the
25300 INSN earlier, reduce the priority to execute INSN later. Do not
25301 define this macro if you do not need to adjust the scheduling
25302 priorities of insns. */
25304 static int
25305 rs6000_adjust_priority (rtx_insn *insn ATTRIBUTE_UNUSED, int priority)
25307 rtx load_mem, str_mem;
25308 /* On machines (like the 750) which have asymmetric integer units,
25309 where one integer unit can do multiply and divides and the other
25310 can't, reduce the priority of multiply/divide so it is scheduled
25311 before other integer operations. */
25313 #if 0
25314 if (! INSN_P (insn))
25315 return priority;
25317 if (GET_CODE (PATTERN (insn)) == USE)
25318 return priority;
25320 switch (rs6000_tune) {
25321 case PROCESSOR_PPC750:
25322 switch (get_attr_type (insn))
25324 default:
25325 break;
25327 case TYPE_MUL:
25328 case TYPE_DIV:
25329 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
25330 priority, priority);
25331 if (priority >= 0 && priority < 0x01000000)
25332 priority >>= 3;
25333 break;
25336 #endif
25338 if (insn_must_be_first_in_group (insn)
25339 && reload_completed
25340 && current_sched_info->sched_max_insns_priority
25341 && rs6000_sched_restricted_insns_priority)
25344 /* Prioritize insns that can be dispatched only in the first
25345 dispatch slot. */
25346 if (rs6000_sched_restricted_insns_priority == 1)
25347 /* Attach highest priority to insn. This means that in
25348 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
25349 precede 'priority' (critical path) considerations. */
25350 return current_sched_info->sched_max_insns_priority;
25351 else if (rs6000_sched_restricted_insns_priority == 2)
25352 /* Increase priority of insn by a minimal amount. This means that in
25353 haifa-sched.c:ready_sort(), only 'priority' (critical path)
25354 considerations precede dispatch-slot restriction considerations. */
25355 return (priority + 1);
25358 if (rs6000_tune == PROCESSOR_POWER6
25359 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
25360 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
25361 /* Attach highest priority to insn if the scheduler has just issued two
25362 stores and this instruction is a load, or two loads and this instruction
25363 is a store. Power6 wants loads and stores scheduled alternately
25364 when possible */
25365 return current_sched_info->sched_max_insns_priority;
25367 return priority;
25370 /* Return true if the instruction is nonpipelined on the Cell. */
25371 static bool
25372 is_nonpipeline_insn (rtx_insn *insn)
25374 enum attr_type type;
25375 if (!insn || !NONDEBUG_INSN_P (insn)
25376 || GET_CODE (PATTERN (insn)) == USE
25377 || GET_CODE (PATTERN (insn)) == CLOBBER)
25378 return false;
25380 type = get_attr_type (insn);
25381 if (type == TYPE_MUL
25382 || type == TYPE_DIV
25383 || type == TYPE_SDIV
25384 || type == TYPE_DDIV
25385 || type == TYPE_SSQRT
25386 || type == TYPE_DSQRT
25387 || type == TYPE_MFCR
25388 || type == TYPE_MFCRF
25389 || type == TYPE_MFJMPR)
25391 return true;
25393 return false;
25397 /* Return how many instructions the machine can issue per cycle. */
25399 static int
25400 rs6000_issue_rate (void)
25402 /* Unless scheduling for register pressure, use issue rate of 1 for
25403 first scheduling pass to decrease degradation. */
25404 if (!reload_completed && !flag_sched_pressure)
25405 return 1;
25407 switch (rs6000_tune) {
25408 case PROCESSOR_RS64A:
25409 case PROCESSOR_PPC601: /* ? */
25410 case PROCESSOR_PPC7450:
25411 return 3;
25412 case PROCESSOR_PPC440:
25413 case PROCESSOR_PPC603:
25414 case PROCESSOR_PPC750:
25415 case PROCESSOR_PPC7400:
25416 case PROCESSOR_PPC8540:
25417 case PROCESSOR_PPC8548:
25418 case PROCESSOR_CELL:
25419 case PROCESSOR_PPCE300C2:
25420 case PROCESSOR_PPCE300C3:
25421 case PROCESSOR_PPCE500MC:
25422 case PROCESSOR_PPCE500MC64:
25423 case PROCESSOR_PPCE5500:
25424 case PROCESSOR_PPCE6500:
25425 case PROCESSOR_TITAN:
25426 return 2;
25427 case PROCESSOR_PPC476:
25428 case PROCESSOR_PPC604:
25429 case PROCESSOR_PPC604e:
25430 case PROCESSOR_PPC620:
25431 case PROCESSOR_PPC630:
25432 return 4;
25433 case PROCESSOR_POWER4:
25434 case PROCESSOR_POWER5:
25435 case PROCESSOR_POWER6:
25436 case PROCESSOR_POWER7:
25437 return 5;
25438 case PROCESSOR_POWER8:
25439 return 7;
25440 case PROCESSOR_POWER9:
25441 case PROCESSOR_FUTURE:
25442 return 6;
25443 default:
25444 return 1;
25448 /* Return how many instructions to look ahead for better insn
25449 scheduling. */
25451 static int
25452 rs6000_use_sched_lookahead (void)
25454 switch (rs6000_tune)
25456 case PROCESSOR_PPC8540:
25457 case PROCESSOR_PPC8548:
25458 return 4;
25460 case PROCESSOR_CELL:
25461 return (reload_completed ? 8 : 0);
25463 default:
25464 return 0;
25468 /* We are choosing insn from the ready queue. Return zero if INSN can be
25469 chosen. */
25470 static int
25471 rs6000_use_sched_lookahead_guard (rtx_insn *insn, int ready_index)
25473 if (ready_index == 0)
25474 return 0;
25476 if (rs6000_tune != PROCESSOR_CELL)
25477 return 0;
25479 gcc_assert (insn != NULL_RTX && INSN_P (insn));
25481 if (!reload_completed
25482 || is_nonpipeline_insn (insn)
25483 || is_microcoded_insn (insn))
25484 return 1;
25486 return 0;
25489 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
25490 and return true. */
25492 static bool
25493 find_mem_ref (rtx pat, rtx *mem_ref)
25495 const char * fmt;
25496 int i, j;
25498 /* stack_tie does not produce any real memory traffic. */
25499 if (tie_operand (pat, VOIDmode))
25500 return false;
25502 if (MEM_P (pat))
25504 *mem_ref = pat;
25505 return true;
25508 /* Recursively process the pattern. */
25509 fmt = GET_RTX_FORMAT (GET_CODE (pat));
25511 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
25513 if (fmt[i] == 'e')
25515 if (find_mem_ref (XEXP (pat, i), mem_ref))
25516 return true;
25518 else if (fmt[i] == 'E')
25519 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
25521 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
25522 return true;
25526 return false;
25529 /* Determine if PAT is a PATTERN of a load insn. */
25531 static bool
25532 is_load_insn1 (rtx pat, rtx *load_mem)
25534 if (!pat || pat == NULL_RTX)
25535 return false;
25537 if (GET_CODE (pat) == SET)
25538 return find_mem_ref (SET_SRC (pat), load_mem);
25540 if (GET_CODE (pat) == PARALLEL)
25542 int i;
25544 for (i = 0; i < XVECLEN (pat, 0); i++)
25545 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
25546 return true;
25549 return false;
25552 /* Determine if INSN loads from memory. */
25554 static bool
25555 is_load_insn (rtx insn, rtx *load_mem)
25557 if (!insn || !INSN_P (insn))
25558 return false;
25560 if (CALL_P (insn))
25561 return false;
25563 return is_load_insn1 (PATTERN (insn), load_mem);
25566 /* Determine if PAT is a PATTERN of a store insn. */
25568 static bool
25569 is_store_insn1 (rtx pat, rtx *str_mem)
25571 if (!pat || pat == NULL_RTX)
25572 return false;
25574 if (GET_CODE (pat) == SET)
25575 return find_mem_ref (SET_DEST (pat), str_mem);
25577 if (GET_CODE (pat) == PARALLEL)
25579 int i;
25581 for (i = 0; i < XVECLEN (pat, 0); i++)
25582 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
25583 return true;
25586 return false;
25589 /* Determine if INSN stores to memory. */
25591 static bool
25592 is_store_insn (rtx insn, rtx *str_mem)
25594 if (!insn || !INSN_P (insn))
25595 return false;
25597 return is_store_insn1 (PATTERN (insn), str_mem);
25600 /* Return whether TYPE is a Power9 pairable vector instruction type. */
25602 static bool
25603 is_power9_pairable_vec_type (enum attr_type type)
25605 switch (type)
25607 case TYPE_VECSIMPLE:
25608 case TYPE_VECCOMPLEX:
25609 case TYPE_VECDIV:
25610 case TYPE_VECCMP:
25611 case TYPE_VECPERM:
25612 case TYPE_VECFLOAT:
25613 case TYPE_VECFDIV:
25614 case TYPE_VECDOUBLE:
25615 return true;
25616 default:
25617 break;
25619 return false;
25622 /* Returns whether the dependence between INSN and NEXT is considered
25623 costly by the given target. */
25625 static bool
25626 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
25628 rtx insn;
25629 rtx next;
25630 rtx load_mem, str_mem;
25632 /* If the flag is not enabled - no dependence is considered costly;
25633 allow all dependent insns in the same group.
25634 This is the most aggressive option. */
25635 if (rs6000_sched_costly_dep == no_dep_costly)
25636 return false;
25638 /* If the flag is set to 1 - a dependence is always considered costly;
25639 do not allow dependent instructions in the same group.
25640 This is the most conservative option. */
25641 if (rs6000_sched_costly_dep == all_deps_costly)
25642 return true;
25644 insn = DEP_PRO (dep);
25645 next = DEP_CON (dep);
25647 if (rs6000_sched_costly_dep == store_to_load_dep_costly
25648 && is_load_insn (next, &load_mem)
25649 && is_store_insn (insn, &str_mem))
25650 /* Prevent load after store in the same group. */
25651 return true;
25653 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
25654 && is_load_insn (next, &load_mem)
25655 && is_store_insn (insn, &str_mem)
25656 && DEP_TYPE (dep) == REG_DEP_TRUE
25657 && mem_locations_overlap(str_mem, load_mem))
25658 /* Prevent load after store in the same group if it is a true
25659 dependence. */
25660 return true;
25662 /* The flag is set to X; dependences with latency >= X are considered costly,
25663 and will not be scheduled in the same group. */
25664 if (rs6000_sched_costly_dep <= max_dep_latency
25665 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
25666 return true;
25668 return false;
25671 /* Return the next insn after INSN that is found before TAIL is reached,
25672 skipping any "non-active" insns - insns that will not actually occupy
25673 an issue slot. Return NULL_RTX if such an insn is not found. */
25675 static rtx_insn *
25676 get_next_active_insn (rtx_insn *insn, rtx_insn *tail)
25678 if (insn == NULL_RTX || insn == tail)
25679 return NULL;
25681 while (1)
25683 insn = NEXT_INSN (insn);
25684 if (insn == NULL_RTX || insn == tail)
25685 return NULL;
25687 if (CALL_P (insn)
25688 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
25689 || (NONJUMP_INSN_P (insn)
25690 && GET_CODE (PATTERN (insn)) != USE
25691 && GET_CODE (PATTERN (insn)) != CLOBBER
25692 && INSN_CODE (insn) != CODE_FOR_stack_tie))
25693 break;
25695 return insn;
25698 /* Do Power9 specific sched_reorder2 reordering of ready list. */
25700 static int
25701 power9_sched_reorder2 (rtx_insn **ready, int lastpos)
25703 int pos;
25704 int i;
25705 rtx_insn *tmp;
25706 enum attr_type type, type2;
25708 type = get_attr_type (last_scheduled_insn);
25710 /* Try to issue fixed point divides back-to-back in pairs so they will be
25711 routed to separate execution units and execute in parallel. */
25712 if (type == TYPE_DIV && divide_cnt == 0)
25714 /* First divide has been scheduled. */
25715 divide_cnt = 1;
25717 /* Scan the ready list looking for another divide, if found move it
25718 to the end of the list so it is chosen next. */
25719 pos = lastpos;
25720 while (pos >= 0)
25722 if (recog_memoized (ready[pos]) >= 0
25723 && get_attr_type (ready[pos]) == TYPE_DIV)
25725 tmp = ready[pos];
25726 for (i = pos; i < lastpos; i++)
25727 ready[i] = ready[i + 1];
25728 ready[lastpos] = tmp;
25729 break;
25731 pos--;
25734 else
25736 /* Last insn was the 2nd divide or not a divide, reset the counter. */
25737 divide_cnt = 0;
25739 /* The best dispatch throughput for vector and vector load insns can be
25740 achieved by interleaving a vector and vector load such that they'll
25741 dispatch to the same superslice. If this pairing cannot be achieved
25742 then it is best to pair vector insns together and vector load insns
25743 together.
25745 To aid in this pairing, vec_pairing maintains the current state with
25746 the following values:
25748 0 : Initial state, no vecload/vector pairing has been started.
25750 1 : A vecload or vector insn has been issued and a candidate for
25751 pairing has been found and moved to the end of the ready
25752 list. */
25753 if (type == TYPE_VECLOAD)
25755 /* Issued a vecload. */
25756 if (vec_pairing == 0)
25758 int vecload_pos = -1;
25759 /* We issued a single vecload, look for a vector insn to pair it
25760 with. If one isn't found, try to pair another vecload. */
25761 pos = lastpos;
25762 while (pos >= 0)
25764 if (recog_memoized (ready[pos]) >= 0)
25766 type2 = get_attr_type (ready[pos]);
25767 if (is_power9_pairable_vec_type (type2))
25769 /* Found a vector insn to pair with, move it to the
25770 end of the ready list so it is scheduled next. */
25771 tmp = ready[pos];
25772 for (i = pos; i < lastpos; i++)
25773 ready[i] = ready[i + 1];
25774 ready[lastpos] = tmp;
25775 vec_pairing = 1;
25776 return cached_can_issue_more;
25778 else if (type2 == TYPE_VECLOAD && vecload_pos == -1)
25779 /* Remember position of first vecload seen. */
25780 vecload_pos = pos;
25782 pos--;
25784 if (vecload_pos >= 0)
25786 /* Didn't find a vector to pair with but did find a vecload,
25787 move it to the end of the ready list. */
25788 tmp = ready[vecload_pos];
25789 for (i = vecload_pos; i < lastpos; i++)
25790 ready[i] = ready[i + 1];
25791 ready[lastpos] = tmp;
25792 vec_pairing = 1;
25793 return cached_can_issue_more;
25797 else if (is_power9_pairable_vec_type (type))
25799 /* Issued a vector operation. */
25800 if (vec_pairing == 0)
25802 int vec_pos = -1;
25803 /* We issued a single vector insn, look for a vecload to pair it
25804 with. If one isn't found, try to pair another vector. */
25805 pos = lastpos;
25806 while (pos >= 0)
25808 if (recog_memoized (ready[pos]) >= 0)
25810 type2 = get_attr_type (ready[pos]);
25811 if (type2 == TYPE_VECLOAD)
25813 /* Found a vecload insn to pair with, move it to the
25814 end of the ready list so it is scheduled next. */
25815 tmp = ready[pos];
25816 for (i = pos; i < lastpos; i++)
25817 ready[i] = ready[i + 1];
25818 ready[lastpos] = tmp;
25819 vec_pairing = 1;
25820 return cached_can_issue_more;
25822 else if (is_power9_pairable_vec_type (type2)
25823 && vec_pos == -1)
25824 /* Remember position of first vector insn seen. */
25825 vec_pos = pos;
25827 pos--;
25829 if (vec_pos >= 0)
25831 /* Didn't find a vecload to pair with but did find a vector
25832 insn, move it to the end of the ready list. */
25833 tmp = ready[vec_pos];
25834 for (i = vec_pos; i < lastpos; i++)
25835 ready[i] = ready[i + 1];
25836 ready[lastpos] = tmp;
25837 vec_pairing = 1;
25838 return cached_can_issue_more;
25843 /* We've either finished a vec/vecload pair, couldn't find an insn to
25844 continue the current pair, or the last insn had nothing to do with
25845 with pairing. In any case, reset the state. */
25846 vec_pairing = 0;
25849 return cached_can_issue_more;
25852 /* We are about to begin issuing insns for this clock cycle. */
25854 static int
25855 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
25856 rtx_insn **ready ATTRIBUTE_UNUSED,
25857 int *pn_ready ATTRIBUTE_UNUSED,
25858 int clock_var ATTRIBUTE_UNUSED)
25860 int n_ready = *pn_ready;
25862 if (sched_verbose)
25863 fprintf (dump, "// rs6000_sched_reorder :\n");
25865 /* Reorder the ready list, if the second to last ready insn
25866 is a nonepipeline insn. */
25867 if (rs6000_tune == PROCESSOR_CELL && n_ready > 1)
25869 if (is_nonpipeline_insn (ready[n_ready - 1])
25870 && (recog_memoized (ready[n_ready - 2]) > 0))
25871 /* Simply swap first two insns. */
25872 std::swap (ready[n_ready - 1], ready[n_ready - 2]);
25875 if (rs6000_tune == PROCESSOR_POWER6)
25876 load_store_pendulum = 0;
25878 return rs6000_issue_rate ();
25881 /* Like rs6000_sched_reorder, but called after issuing each insn. */
25883 static int
25884 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx_insn **ready,
25885 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
25887 if (sched_verbose)
25888 fprintf (dump, "// rs6000_sched_reorder2 :\n");
25890 /* For Power6, we need to handle some special cases to try and keep the
25891 store queue from overflowing and triggering expensive flushes.
25893 This code monitors how load and store instructions are being issued
25894 and skews the ready list one way or the other to increase the likelihood
25895 that a desired instruction is issued at the proper time.
25897 A couple of things are done. First, we maintain a "load_store_pendulum"
25898 to track the current state of load/store issue.
25900 - If the pendulum is at zero, then no loads or stores have been
25901 issued in the current cycle so we do nothing.
25903 - If the pendulum is 1, then a single load has been issued in this
25904 cycle and we attempt to locate another load in the ready list to
25905 issue with it.
25907 - If the pendulum is -2, then two stores have already been
25908 issued in this cycle, so we increase the priority of the first load
25909 in the ready list to increase it's likelihood of being chosen first
25910 in the next cycle.
25912 - If the pendulum is -1, then a single store has been issued in this
25913 cycle and we attempt to locate another store in the ready list to
25914 issue with it, preferring a store to an adjacent memory location to
25915 facilitate store pairing in the store queue.
25917 - If the pendulum is 2, then two loads have already been
25918 issued in this cycle, so we increase the priority of the first store
25919 in the ready list to increase it's likelihood of being chosen first
25920 in the next cycle.
25922 - If the pendulum < -2 or > 2, then do nothing.
25924 Note: This code covers the most common scenarios. There exist non
25925 load/store instructions which make use of the LSU and which
25926 would need to be accounted for to strictly model the behavior
25927 of the machine. Those instructions are currently unaccounted
25928 for to help minimize compile time overhead of this code.
25930 if (rs6000_tune == PROCESSOR_POWER6 && last_scheduled_insn)
25932 int pos;
25933 int i;
25934 rtx_insn *tmp;
25935 rtx load_mem, str_mem;
25937 if (is_store_insn (last_scheduled_insn, &str_mem))
25938 /* Issuing a store, swing the load_store_pendulum to the left */
25939 load_store_pendulum--;
25940 else if (is_load_insn (last_scheduled_insn, &load_mem))
25941 /* Issuing a load, swing the load_store_pendulum to the right */
25942 load_store_pendulum++;
25943 else
25944 return cached_can_issue_more;
25946 /* If the pendulum is balanced, or there is only one instruction on
25947 the ready list, then all is well, so return. */
25948 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
25949 return cached_can_issue_more;
25951 if (load_store_pendulum == 1)
25953 /* A load has been issued in this cycle. Scan the ready list
25954 for another load to issue with it */
25955 pos = *pn_ready-1;
25957 while (pos >= 0)
25959 if (is_load_insn (ready[pos], &load_mem))
25961 /* Found a load. Move it to the head of the ready list,
25962 and adjust it's priority so that it is more likely to
25963 stay there */
25964 tmp = ready[pos];
25965 for (i=pos; i<*pn_ready-1; i++)
25966 ready[i] = ready[i + 1];
25967 ready[*pn_ready-1] = tmp;
25969 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
25970 INSN_PRIORITY (tmp)++;
25971 break;
25973 pos--;
25976 else if (load_store_pendulum == -2)
25978 /* Two stores have been issued in this cycle. Increase the
25979 priority of the first load in the ready list to favor it for
25980 issuing in the next cycle. */
25981 pos = *pn_ready-1;
25983 while (pos >= 0)
25985 if (is_load_insn (ready[pos], &load_mem)
25986 && !sel_sched_p ()
25987 && INSN_PRIORITY_KNOWN (ready[pos]))
25989 INSN_PRIORITY (ready[pos])++;
25991 /* Adjust the pendulum to account for the fact that a load
25992 was found and increased in priority. This is to prevent
25993 increasing the priority of multiple loads */
25994 load_store_pendulum--;
25996 break;
25998 pos--;
26001 else if (load_store_pendulum == -1)
26003 /* A store has been issued in this cycle. Scan the ready list for
26004 another store to issue with it, preferring a store to an adjacent
26005 memory location */
26006 int first_store_pos = -1;
26008 pos = *pn_ready-1;
26010 while (pos >= 0)
26012 if (is_store_insn (ready[pos], &str_mem))
26014 rtx str_mem2;
26015 /* Maintain the index of the first store found on the
26016 list */
26017 if (first_store_pos == -1)
26018 first_store_pos = pos;
26020 if (is_store_insn (last_scheduled_insn, &str_mem2)
26021 && adjacent_mem_locations (str_mem, str_mem2))
26023 /* Found an adjacent store. Move it to the head of the
26024 ready list, and adjust it's priority so that it is
26025 more likely to stay there */
26026 tmp = ready[pos];
26027 for (i=pos; i<*pn_ready-1; i++)
26028 ready[i] = ready[i + 1];
26029 ready[*pn_ready-1] = tmp;
26031 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
26032 INSN_PRIORITY (tmp)++;
26034 first_store_pos = -1;
26036 break;
26039 pos--;
26042 if (first_store_pos >= 0)
26044 /* An adjacent store wasn't found, but a non-adjacent store was,
26045 so move the non-adjacent store to the front of the ready
26046 list, and adjust its priority so that it is more likely to
26047 stay there. */
26048 tmp = ready[first_store_pos];
26049 for (i=first_store_pos; i<*pn_ready-1; i++)
26050 ready[i] = ready[i + 1];
26051 ready[*pn_ready-1] = tmp;
26052 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
26053 INSN_PRIORITY (tmp)++;
26056 else if (load_store_pendulum == 2)
26058 /* Two loads have been issued in this cycle. Increase the priority
26059 of the first store in the ready list to favor it for issuing in
26060 the next cycle. */
26061 pos = *pn_ready-1;
26063 while (pos >= 0)
26065 if (is_store_insn (ready[pos], &str_mem)
26066 && !sel_sched_p ()
26067 && INSN_PRIORITY_KNOWN (ready[pos]))
26069 INSN_PRIORITY (ready[pos])++;
26071 /* Adjust the pendulum to account for the fact that a store
26072 was found and increased in priority. This is to prevent
26073 increasing the priority of multiple stores */
26074 load_store_pendulum++;
26076 break;
26078 pos--;
26083 /* Do Power9 dependent reordering if necessary. */
26084 if (rs6000_tune == PROCESSOR_POWER9 && last_scheduled_insn
26085 && recog_memoized (last_scheduled_insn) >= 0)
26086 return power9_sched_reorder2 (ready, *pn_ready - 1);
26088 return cached_can_issue_more;
26091 /* Return whether the presence of INSN causes a dispatch group termination
26092 of group WHICH_GROUP.
26094 If WHICH_GROUP == current_group, this function will return true if INSN
26095 causes the termination of the current group (i.e, the dispatch group to
26096 which INSN belongs). This means that INSN will be the last insn in the
26097 group it belongs to.
26099 If WHICH_GROUP == previous_group, this function will return true if INSN
26100 causes the termination of the previous group (i.e, the dispatch group that
26101 precedes the group to which INSN belongs). This means that INSN will be
26102 the first insn in the group it belongs to). */
26104 static bool
26105 insn_terminates_group_p (rtx_insn *insn, enum group_termination which_group)
26107 bool first, last;
26109 if (! insn)
26110 return false;
26112 first = insn_must_be_first_in_group (insn);
26113 last = insn_must_be_last_in_group (insn);
26115 if (first && last)
26116 return true;
26118 if (which_group == current_group)
26119 return last;
26120 else if (which_group == previous_group)
26121 return first;
26123 return false;
26127 static bool
26128 insn_must_be_first_in_group (rtx_insn *insn)
26130 enum attr_type type;
26132 if (!insn
26133 || NOTE_P (insn)
26134 || DEBUG_INSN_P (insn)
26135 || GET_CODE (PATTERN (insn)) == USE
26136 || GET_CODE (PATTERN (insn)) == CLOBBER)
26137 return false;
26139 switch (rs6000_tune)
26141 case PROCESSOR_POWER5:
26142 if (is_cracked_insn (insn))
26143 return true;
26144 /* FALLTHRU */
26145 case PROCESSOR_POWER4:
26146 if (is_microcoded_insn (insn))
26147 return true;
26149 if (!rs6000_sched_groups)
26150 return false;
26152 type = get_attr_type (insn);
26154 switch (type)
26156 case TYPE_MFCR:
26157 case TYPE_MFCRF:
26158 case TYPE_MTCR:
26159 case TYPE_CR_LOGICAL:
26160 case TYPE_MTJMPR:
26161 case TYPE_MFJMPR:
26162 case TYPE_DIV:
26163 case TYPE_LOAD_L:
26164 case TYPE_STORE_C:
26165 case TYPE_ISYNC:
26166 case TYPE_SYNC:
26167 return true;
26168 default:
26169 break;
26171 break;
26172 case PROCESSOR_POWER6:
26173 type = get_attr_type (insn);
26175 switch (type)
26177 case TYPE_EXTS:
26178 case TYPE_CNTLZ:
26179 case TYPE_TRAP:
26180 case TYPE_MUL:
26181 case TYPE_INSERT:
26182 case TYPE_FPCOMPARE:
26183 case TYPE_MFCR:
26184 case TYPE_MTCR:
26185 case TYPE_MFJMPR:
26186 case TYPE_MTJMPR:
26187 case TYPE_ISYNC:
26188 case TYPE_SYNC:
26189 case TYPE_LOAD_L:
26190 case TYPE_STORE_C:
26191 return true;
26192 case TYPE_SHIFT:
26193 if (get_attr_dot (insn) == DOT_NO
26194 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
26195 return true;
26196 else
26197 break;
26198 case TYPE_DIV:
26199 if (get_attr_size (insn) == SIZE_32)
26200 return true;
26201 else
26202 break;
26203 case TYPE_LOAD:
26204 case TYPE_STORE:
26205 case TYPE_FPLOAD:
26206 case TYPE_FPSTORE:
26207 if (get_attr_update (insn) == UPDATE_YES)
26208 return true;
26209 else
26210 break;
26211 default:
26212 break;
26214 break;
26215 case PROCESSOR_POWER7:
26216 type = get_attr_type (insn);
26218 switch (type)
26220 case TYPE_CR_LOGICAL:
26221 case TYPE_MFCR:
26222 case TYPE_MFCRF:
26223 case TYPE_MTCR:
26224 case TYPE_DIV:
26225 case TYPE_ISYNC:
26226 case TYPE_LOAD_L:
26227 case TYPE_STORE_C:
26228 case TYPE_MFJMPR:
26229 case TYPE_MTJMPR:
26230 return true;
26231 case TYPE_MUL:
26232 case TYPE_SHIFT:
26233 case TYPE_EXTS:
26234 if (get_attr_dot (insn) == DOT_YES)
26235 return true;
26236 else
26237 break;
26238 case TYPE_LOAD:
26239 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
26240 || get_attr_update (insn) == UPDATE_YES)
26241 return true;
26242 else
26243 break;
26244 case TYPE_STORE:
26245 case TYPE_FPLOAD:
26246 case TYPE_FPSTORE:
26247 if (get_attr_update (insn) == UPDATE_YES)
26248 return true;
26249 else
26250 break;
26251 default:
26252 break;
26254 break;
26255 case PROCESSOR_POWER8:
26256 type = get_attr_type (insn);
26258 switch (type)
26260 case TYPE_CR_LOGICAL:
26261 case TYPE_MFCR:
26262 case TYPE_MFCRF:
26263 case TYPE_MTCR:
26264 case TYPE_SYNC:
26265 case TYPE_ISYNC:
26266 case TYPE_LOAD_L:
26267 case TYPE_STORE_C:
26268 case TYPE_VECSTORE:
26269 case TYPE_MFJMPR:
26270 case TYPE_MTJMPR:
26271 return true;
26272 case TYPE_SHIFT:
26273 case TYPE_EXTS:
26274 case TYPE_MUL:
26275 if (get_attr_dot (insn) == DOT_YES)
26276 return true;
26277 else
26278 break;
26279 case TYPE_LOAD:
26280 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
26281 || get_attr_update (insn) == UPDATE_YES)
26282 return true;
26283 else
26284 break;
26285 case TYPE_STORE:
26286 if (get_attr_update (insn) == UPDATE_YES
26287 && get_attr_indexed (insn) == INDEXED_YES)
26288 return true;
26289 else
26290 break;
26291 default:
26292 break;
26294 break;
26295 default:
26296 break;
26299 return false;
26302 static bool
26303 insn_must_be_last_in_group (rtx_insn *insn)
26305 enum attr_type type;
26307 if (!insn
26308 || NOTE_P (insn)
26309 || DEBUG_INSN_P (insn)
26310 || GET_CODE (PATTERN (insn)) == USE
26311 || GET_CODE (PATTERN (insn)) == CLOBBER)
26312 return false;
26314 switch (rs6000_tune) {
26315 case PROCESSOR_POWER4:
26316 case PROCESSOR_POWER5:
26317 if (is_microcoded_insn (insn))
26318 return true;
26320 if (is_branch_slot_insn (insn))
26321 return true;
26323 break;
26324 case PROCESSOR_POWER6:
26325 type = get_attr_type (insn);
26327 switch (type)
26329 case TYPE_EXTS:
26330 case TYPE_CNTLZ:
26331 case TYPE_TRAP:
26332 case TYPE_MUL:
26333 case TYPE_FPCOMPARE:
26334 case TYPE_MFCR:
26335 case TYPE_MTCR:
26336 case TYPE_MFJMPR:
26337 case TYPE_MTJMPR:
26338 case TYPE_ISYNC:
26339 case TYPE_SYNC:
26340 case TYPE_LOAD_L:
26341 case TYPE_STORE_C:
26342 return true;
26343 case TYPE_SHIFT:
26344 if (get_attr_dot (insn) == DOT_NO
26345 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
26346 return true;
26347 else
26348 break;
26349 case TYPE_DIV:
26350 if (get_attr_size (insn) == SIZE_32)
26351 return true;
26352 else
26353 break;
26354 default:
26355 break;
26357 break;
26358 case PROCESSOR_POWER7:
26359 type = get_attr_type (insn);
26361 switch (type)
26363 case TYPE_ISYNC:
26364 case TYPE_SYNC:
26365 case TYPE_LOAD_L:
26366 case TYPE_STORE_C:
26367 return true;
26368 case TYPE_LOAD:
26369 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
26370 && get_attr_update (insn) == UPDATE_YES)
26371 return true;
26372 else
26373 break;
26374 case TYPE_STORE:
26375 if (get_attr_update (insn) == UPDATE_YES
26376 && get_attr_indexed (insn) == INDEXED_YES)
26377 return true;
26378 else
26379 break;
26380 default:
26381 break;
26383 break;
26384 case PROCESSOR_POWER8:
26385 type = get_attr_type (insn);
26387 switch (type)
26389 case TYPE_MFCR:
26390 case TYPE_MTCR:
26391 case TYPE_ISYNC:
26392 case TYPE_SYNC:
26393 case TYPE_LOAD_L:
26394 case TYPE_STORE_C:
26395 return true;
26396 case TYPE_LOAD:
26397 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
26398 && get_attr_update (insn) == UPDATE_YES)
26399 return true;
26400 else
26401 break;
26402 case TYPE_STORE:
26403 if (get_attr_update (insn) == UPDATE_YES
26404 && get_attr_indexed (insn) == INDEXED_YES)
26405 return true;
26406 else
26407 break;
26408 default:
26409 break;
26411 break;
26412 default:
26413 break;
26416 return false;
26419 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
26420 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
26422 static bool
26423 is_costly_group (rtx *group_insns, rtx next_insn)
26425 int i;
26426 int issue_rate = rs6000_issue_rate ();
26428 for (i = 0; i < issue_rate; i++)
26430 sd_iterator_def sd_it;
26431 dep_t dep;
26432 rtx insn = group_insns[i];
26434 if (!insn)
26435 continue;
26437 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
26439 rtx next = DEP_CON (dep);
26441 if (next == next_insn
26442 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
26443 return true;
26447 return false;
26450 /* Utility of the function redefine_groups.
26451 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
26452 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
26453 to keep it "far" (in a separate group) from GROUP_INSNS, following
26454 one of the following schemes, depending on the value of the flag
26455 -minsert_sched_nops = X:
26456 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
26457 in order to force NEXT_INSN into a separate group.
26458 (2) X < sched_finish_regroup_exact: insert exactly X nops.
26459 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
26460 insertion (has a group just ended, how many vacant issue slots remain in the
26461 last group, and how many dispatch groups were encountered so far). */
26463 static int
26464 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
26465 rtx_insn *next_insn, bool *group_end, int can_issue_more,
26466 int *group_count)
26468 rtx nop;
26469 bool force;
26470 int issue_rate = rs6000_issue_rate ();
26471 bool end = *group_end;
26472 int i;
26474 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
26475 return can_issue_more;
26477 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
26478 return can_issue_more;
26480 force = is_costly_group (group_insns, next_insn);
26481 if (!force)
26482 return can_issue_more;
26484 if (sched_verbose > 6)
26485 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
26486 *group_count ,can_issue_more);
26488 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
26490 if (*group_end)
26491 can_issue_more = 0;
26493 /* Since only a branch can be issued in the last issue_slot, it is
26494 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
26495 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
26496 in this case the last nop will start a new group and the branch
26497 will be forced to the new group. */
26498 if (can_issue_more && !is_branch_slot_insn (next_insn))
26499 can_issue_more--;
26501 /* Do we have a special group ending nop? */
26502 if (rs6000_tune == PROCESSOR_POWER6 || rs6000_tune == PROCESSOR_POWER7
26503 || rs6000_tune == PROCESSOR_POWER8)
26505 nop = gen_group_ending_nop ();
26506 emit_insn_before (nop, next_insn);
26507 can_issue_more = 0;
26509 else
26510 while (can_issue_more > 0)
26512 nop = gen_nop ();
26513 emit_insn_before (nop, next_insn);
26514 can_issue_more--;
26517 *group_end = true;
26518 return 0;
26521 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
26523 int n_nops = rs6000_sched_insert_nops;
26525 /* Nops can't be issued from the branch slot, so the effective
26526 issue_rate for nops is 'issue_rate - 1'. */
26527 if (can_issue_more == 0)
26528 can_issue_more = issue_rate;
26529 can_issue_more--;
26530 if (can_issue_more == 0)
26532 can_issue_more = issue_rate - 1;
26533 (*group_count)++;
26534 end = true;
26535 for (i = 0; i < issue_rate; i++)
26537 group_insns[i] = 0;
26541 while (n_nops > 0)
26543 nop = gen_nop ();
26544 emit_insn_before (nop, next_insn);
26545 if (can_issue_more == issue_rate - 1) /* new group begins */
26546 end = false;
26547 can_issue_more--;
26548 if (can_issue_more == 0)
26550 can_issue_more = issue_rate - 1;
26551 (*group_count)++;
26552 end = true;
26553 for (i = 0; i < issue_rate; i++)
26555 group_insns[i] = 0;
26558 n_nops--;
26561 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
26562 can_issue_more++;
26564 /* Is next_insn going to start a new group? */
26565 *group_end
26566 = (end
26567 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
26568 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
26569 || (can_issue_more < issue_rate &&
26570 insn_terminates_group_p (next_insn, previous_group)));
26571 if (*group_end && end)
26572 (*group_count)--;
26574 if (sched_verbose > 6)
26575 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
26576 *group_count, can_issue_more);
26577 return can_issue_more;
26580 return can_issue_more;
26583 /* This function tries to synch the dispatch groups that the compiler "sees"
26584 with the dispatch groups that the processor dispatcher is expected to
26585 form in practice. It tries to achieve this synchronization by forcing the
26586 estimated processor grouping on the compiler (as opposed to the function
26587 'pad_goups' which tries to force the scheduler's grouping on the processor).
26589 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
26590 examines the (estimated) dispatch groups that will be formed by the processor
26591 dispatcher. It marks these group boundaries to reflect the estimated
26592 processor grouping, overriding the grouping that the scheduler had marked.
26593 Depending on the value of the flag '-minsert-sched-nops' this function can
26594 force certain insns into separate groups or force a certain distance between
26595 them by inserting nops, for example, if there exists a "costly dependence"
26596 between the insns.
26598 The function estimates the group boundaries that the processor will form as
26599 follows: It keeps track of how many vacant issue slots are available after
26600 each insn. A subsequent insn will start a new group if one of the following
26601 4 cases applies:
26602 - no more vacant issue slots remain in the current dispatch group.
26603 - only the last issue slot, which is the branch slot, is vacant, but the next
26604 insn is not a branch.
26605 - only the last 2 or less issue slots, including the branch slot, are vacant,
26606 which means that a cracked insn (which occupies two issue slots) can't be
26607 issued in this group.
26608 - less than 'issue_rate' slots are vacant, and the next insn always needs to
26609 start a new group. */
26611 static int
26612 redefine_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
26613 rtx_insn *tail)
26615 rtx_insn *insn, *next_insn;
26616 int issue_rate;
26617 int can_issue_more;
26618 int slot, i;
26619 bool group_end;
26620 int group_count = 0;
26621 rtx *group_insns;
26623 /* Initialize. */
26624 issue_rate = rs6000_issue_rate ();
26625 group_insns = XALLOCAVEC (rtx, issue_rate);
26626 for (i = 0; i < issue_rate; i++)
26628 group_insns[i] = 0;
26630 can_issue_more = issue_rate;
26631 slot = 0;
26632 insn = get_next_active_insn (prev_head_insn, tail);
26633 group_end = false;
26635 while (insn != NULL_RTX)
26637 slot = (issue_rate - can_issue_more);
26638 group_insns[slot] = insn;
26639 can_issue_more =
26640 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
26641 if (insn_terminates_group_p (insn, current_group))
26642 can_issue_more = 0;
26644 next_insn = get_next_active_insn (insn, tail);
26645 if (next_insn == NULL_RTX)
26646 return group_count + 1;
26648 /* Is next_insn going to start a new group? */
26649 group_end
26650 = (can_issue_more == 0
26651 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
26652 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
26653 || (can_issue_more < issue_rate &&
26654 insn_terminates_group_p (next_insn, previous_group)));
26656 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
26657 next_insn, &group_end, can_issue_more,
26658 &group_count);
26660 if (group_end)
26662 group_count++;
26663 can_issue_more = 0;
26664 for (i = 0; i < issue_rate; i++)
26666 group_insns[i] = 0;
26670 if (GET_MODE (next_insn) == TImode && can_issue_more)
26671 PUT_MODE (next_insn, VOIDmode);
26672 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
26673 PUT_MODE (next_insn, TImode);
26675 insn = next_insn;
26676 if (can_issue_more == 0)
26677 can_issue_more = issue_rate;
26678 } /* while */
26680 return group_count;
26683 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
26684 dispatch group boundaries that the scheduler had marked. Pad with nops
26685 any dispatch groups which have vacant issue slots, in order to force the
26686 scheduler's grouping on the processor dispatcher. The function
26687 returns the number of dispatch groups found. */
26689 static int
26690 pad_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
26691 rtx_insn *tail)
26693 rtx_insn *insn, *next_insn;
26694 rtx nop;
26695 int issue_rate;
26696 int can_issue_more;
26697 int group_end;
26698 int group_count = 0;
26700 /* Initialize issue_rate. */
26701 issue_rate = rs6000_issue_rate ();
26702 can_issue_more = issue_rate;
26704 insn = get_next_active_insn (prev_head_insn, tail);
26705 next_insn = get_next_active_insn (insn, tail);
26707 while (insn != NULL_RTX)
26709 can_issue_more =
26710 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
26712 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
26714 if (next_insn == NULL_RTX)
26715 break;
26717 if (group_end)
26719 /* If the scheduler had marked group termination at this location
26720 (between insn and next_insn), and neither insn nor next_insn will
26721 force group termination, pad the group with nops to force group
26722 termination. */
26723 if (can_issue_more
26724 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
26725 && !insn_terminates_group_p (insn, current_group)
26726 && !insn_terminates_group_p (next_insn, previous_group))
26728 if (!is_branch_slot_insn (next_insn))
26729 can_issue_more--;
26731 while (can_issue_more)
26733 nop = gen_nop ();
26734 emit_insn_before (nop, next_insn);
26735 can_issue_more--;
26739 can_issue_more = issue_rate;
26740 group_count++;
26743 insn = next_insn;
26744 next_insn = get_next_active_insn (insn, tail);
26747 return group_count;
26750 /* We're beginning a new block. Initialize data structures as necessary. */
26752 static void
26753 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
26754 int sched_verbose ATTRIBUTE_UNUSED,
26755 int max_ready ATTRIBUTE_UNUSED)
26757 last_scheduled_insn = NULL;
26758 load_store_pendulum = 0;
26759 divide_cnt = 0;
26760 vec_pairing = 0;
26763 /* The following function is called at the end of scheduling BB.
26764 After reload, it inserts nops at insn group bundling. */
26766 static void
26767 rs6000_sched_finish (FILE *dump, int sched_verbose)
26769 int n_groups;
26771 if (sched_verbose)
26772 fprintf (dump, "=== Finishing schedule.\n");
26774 if (reload_completed && rs6000_sched_groups)
26776 /* Do not run sched_finish hook when selective scheduling enabled. */
26777 if (sel_sched_p ())
26778 return;
26780 if (rs6000_sched_insert_nops == sched_finish_none)
26781 return;
26783 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
26784 n_groups = pad_groups (dump, sched_verbose,
26785 current_sched_info->prev_head,
26786 current_sched_info->next_tail);
26787 else
26788 n_groups = redefine_groups (dump, sched_verbose,
26789 current_sched_info->prev_head,
26790 current_sched_info->next_tail);
26792 if (sched_verbose >= 6)
26794 fprintf (dump, "ngroups = %d\n", n_groups);
26795 print_rtl (dump, current_sched_info->prev_head);
26796 fprintf (dump, "Done finish_sched\n");
26801 struct rs6000_sched_context
26803 short cached_can_issue_more;
26804 rtx_insn *last_scheduled_insn;
26805 int load_store_pendulum;
26806 int divide_cnt;
26807 int vec_pairing;
26810 typedef struct rs6000_sched_context rs6000_sched_context_def;
26811 typedef rs6000_sched_context_def *rs6000_sched_context_t;
26813 /* Allocate store for new scheduling context. */
26814 static void *
26815 rs6000_alloc_sched_context (void)
26817 return xmalloc (sizeof (rs6000_sched_context_def));
26820 /* If CLEAN_P is true then initializes _SC with clean data,
26821 and from the global context otherwise. */
26822 static void
26823 rs6000_init_sched_context (void *_sc, bool clean_p)
26825 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
26827 if (clean_p)
26829 sc->cached_can_issue_more = 0;
26830 sc->last_scheduled_insn = NULL;
26831 sc->load_store_pendulum = 0;
26832 sc->divide_cnt = 0;
26833 sc->vec_pairing = 0;
26835 else
26837 sc->cached_can_issue_more = cached_can_issue_more;
26838 sc->last_scheduled_insn = last_scheduled_insn;
26839 sc->load_store_pendulum = load_store_pendulum;
26840 sc->divide_cnt = divide_cnt;
26841 sc->vec_pairing = vec_pairing;
26845 /* Sets the global scheduling context to the one pointed to by _SC. */
26846 static void
26847 rs6000_set_sched_context (void *_sc)
26849 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
26851 gcc_assert (sc != NULL);
26853 cached_can_issue_more = sc->cached_can_issue_more;
26854 last_scheduled_insn = sc->last_scheduled_insn;
26855 load_store_pendulum = sc->load_store_pendulum;
26856 divide_cnt = sc->divide_cnt;
26857 vec_pairing = sc->vec_pairing;
26860 /* Free _SC. */
26861 static void
26862 rs6000_free_sched_context (void *_sc)
26864 gcc_assert (_sc != NULL);
26866 free (_sc);
26869 static bool
26870 rs6000_sched_can_speculate_insn (rtx_insn *insn)
26872 switch (get_attr_type (insn))
26874 case TYPE_DIV:
26875 case TYPE_SDIV:
26876 case TYPE_DDIV:
26877 case TYPE_VECDIV:
26878 case TYPE_SSQRT:
26879 case TYPE_DSQRT:
26880 return false;
26882 default:
26883 return true;
26887 /* Length in units of the trampoline for entering a nested function. */
26890 rs6000_trampoline_size (void)
26892 int ret = 0;
26894 switch (DEFAULT_ABI)
26896 default:
26897 gcc_unreachable ();
26899 case ABI_AIX:
26900 ret = (TARGET_32BIT) ? 12 : 24;
26901 break;
26903 case ABI_ELFv2:
26904 gcc_assert (!TARGET_32BIT);
26905 ret = 32;
26906 break;
26908 case ABI_DARWIN:
26909 case ABI_V4:
26910 ret = (TARGET_32BIT) ? 40 : 48;
26911 break;
26914 return ret;
26917 /* Emit RTL insns to initialize the variable parts of a trampoline.
26918 FNADDR is an RTX for the address of the function's pure code.
26919 CXT is an RTX for the static chain value for the function. */
26921 static void
26922 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
26924 int regsize = (TARGET_32BIT) ? 4 : 8;
26925 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
26926 rtx ctx_reg = force_reg (Pmode, cxt);
26927 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
26929 switch (DEFAULT_ABI)
26931 default:
26932 gcc_unreachable ();
26934 /* Under AIX, just build the 3 word function descriptor */
26935 case ABI_AIX:
26937 rtx fnmem, fn_reg, toc_reg;
26939 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
26940 error ("you cannot take the address of a nested function if you use "
26941 "the %qs option", "-mno-pointers-to-nested-functions");
26943 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
26944 fn_reg = gen_reg_rtx (Pmode);
26945 toc_reg = gen_reg_rtx (Pmode);
26947 /* Macro to shorten the code expansions below. */
26948 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
26950 m_tramp = replace_equiv_address (m_tramp, addr);
26952 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
26953 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
26954 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
26955 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
26956 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
26958 # undef MEM_PLUS
26960 break;
26962 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
26963 case ABI_ELFv2:
26964 case ABI_DARWIN:
26965 case ABI_V4:
26966 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
26967 LCT_NORMAL, VOIDmode,
26968 addr, Pmode,
26969 GEN_INT (rs6000_trampoline_size ()), SImode,
26970 fnaddr, Pmode,
26971 ctx_reg, Pmode);
26972 break;
26977 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
26978 identifier as an argument, so the front end shouldn't look it up. */
26980 static bool
26981 rs6000_attribute_takes_identifier_p (const_tree attr_id)
26983 return is_attribute_p ("altivec", attr_id);
26986 /* Handle the "altivec" attribute. The attribute may have
26987 arguments as follows:
26989 __attribute__((altivec(vector__)))
26990 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
26991 __attribute__((altivec(bool__))) (always followed by 'unsigned')
26993 and may appear more than once (e.g., 'vector bool char') in a
26994 given declaration. */
26996 static tree
26997 rs6000_handle_altivec_attribute (tree *node,
26998 tree name ATTRIBUTE_UNUSED,
26999 tree args,
27000 int flags ATTRIBUTE_UNUSED,
27001 bool *no_add_attrs)
27003 tree type = *node, result = NULL_TREE;
27004 machine_mode mode;
27005 int unsigned_p;
27006 char altivec_type
27007 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
27008 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
27009 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
27010 : '?');
27012 while (POINTER_TYPE_P (type)
27013 || TREE_CODE (type) == FUNCTION_TYPE
27014 || TREE_CODE (type) == METHOD_TYPE
27015 || TREE_CODE (type) == ARRAY_TYPE)
27016 type = TREE_TYPE (type);
27018 mode = TYPE_MODE (type);
27020 /* Check for invalid AltiVec type qualifiers. */
27021 if (type == long_double_type_node)
27022 error ("use of %<long double%> in AltiVec types is invalid");
27023 else if (type == boolean_type_node)
27024 error ("use of boolean types in AltiVec types is invalid");
27025 else if (TREE_CODE (type) == COMPLEX_TYPE)
27026 error ("use of %<complex%> in AltiVec types is invalid");
27027 else if (DECIMAL_FLOAT_MODE_P (mode))
27028 error ("use of decimal floating point types in AltiVec types is invalid");
27029 else if (!TARGET_VSX)
27031 if (type == long_unsigned_type_node || type == long_integer_type_node)
27033 if (TARGET_64BIT)
27034 error ("use of %<long%> in AltiVec types is invalid for "
27035 "64-bit code without %qs", "-mvsx");
27036 else if (rs6000_warn_altivec_long)
27037 warning (0, "use of %<long%> in AltiVec types is deprecated; "
27038 "use %<int%>");
27040 else if (type == long_long_unsigned_type_node
27041 || type == long_long_integer_type_node)
27042 error ("use of %<long long%> in AltiVec types is invalid without %qs",
27043 "-mvsx");
27044 else if (type == double_type_node)
27045 error ("use of %<double%> in AltiVec types is invalid without %qs",
27046 "-mvsx");
27049 switch (altivec_type)
27051 case 'v':
27052 unsigned_p = TYPE_UNSIGNED (type);
27053 switch (mode)
27055 case E_TImode:
27056 result = (unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node);
27057 break;
27058 case E_DImode:
27059 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
27060 break;
27061 case E_SImode:
27062 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
27063 break;
27064 case E_HImode:
27065 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
27066 break;
27067 case E_QImode:
27068 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
27069 break;
27070 case E_SFmode: result = V4SF_type_node; break;
27071 case E_DFmode: result = V2DF_type_node; break;
27072 /* If the user says 'vector int bool', we may be handed the 'bool'
27073 attribute _before_ the 'vector' attribute, and so select the
27074 proper type in the 'b' case below. */
27075 case E_V4SImode: case E_V8HImode: case E_V16QImode: case E_V4SFmode:
27076 case E_V2DImode: case E_V2DFmode:
27077 result = type;
27078 default: break;
27080 break;
27081 case 'b':
27082 switch (mode)
27084 case E_DImode: case E_V2DImode: result = bool_V2DI_type_node; break;
27085 case E_SImode: case E_V4SImode: result = bool_V4SI_type_node; break;
27086 case E_HImode: case E_V8HImode: result = bool_V8HI_type_node; break;
27087 case E_QImode: case E_V16QImode: result = bool_V16QI_type_node;
27088 default: break;
27090 break;
27091 case 'p':
27092 switch (mode)
27094 case E_V8HImode: result = pixel_V8HI_type_node;
27095 default: break;
27097 default: break;
27100 /* Propagate qualifiers attached to the element type
27101 onto the vector type. */
27102 if (result && result != type && TYPE_QUALS (type))
27103 result = build_qualified_type (result, TYPE_QUALS (type));
27105 *no_add_attrs = true; /* No need to hang on to the attribute. */
27107 if (result)
27108 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
27110 return NULL_TREE;
27113 /* AltiVec defines five built-in scalar types that serve as vector
27114 elements; we must teach the compiler how to mangle them. The 128-bit
27115 floating point mangling is target-specific as well. */
27117 static const char *
27118 rs6000_mangle_type (const_tree type)
27120 type = TYPE_MAIN_VARIANT (type);
27122 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
27123 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
27124 return NULL;
27126 if (type == bool_char_type_node) return "U6__boolc";
27127 if (type == bool_short_type_node) return "U6__bools";
27128 if (type == pixel_type_node) return "u7__pixel";
27129 if (type == bool_int_type_node) return "U6__booli";
27130 if (type == bool_long_long_type_node) return "U6__boolx";
27132 if (SCALAR_FLOAT_TYPE_P (type) && FLOAT128_IBM_P (TYPE_MODE (type)))
27133 return "g";
27134 if (SCALAR_FLOAT_TYPE_P (type) && FLOAT128_IEEE_P (TYPE_MODE (type)))
27135 return ieee128_mangling_gcc_8_1 ? "U10__float128" : "u9__ieee128";
27137 /* For all other types, use the default mangling. */
27138 return NULL;
27141 /* Handle a "longcall" or "shortcall" attribute; arguments as in
27142 struct attribute_spec.handler. */
27144 static tree
27145 rs6000_handle_longcall_attribute (tree *node, tree name,
27146 tree args ATTRIBUTE_UNUSED,
27147 int flags ATTRIBUTE_UNUSED,
27148 bool *no_add_attrs)
27150 if (TREE_CODE (*node) != FUNCTION_TYPE
27151 && TREE_CODE (*node) != FIELD_DECL
27152 && TREE_CODE (*node) != TYPE_DECL)
27154 warning (OPT_Wattributes, "%qE attribute only applies to functions",
27155 name);
27156 *no_add_attrs = true;
27159 return NULL_TREE;
27162 /* Set longcall attributes on all functions declared when
27163 rs6000_default_long_calls is true. */
27164 static void
27165 rs6000_set_default_type_attributes (tree type)
27167 if (rs6000_default_long_calls
27168 && (TREE_CODE (type) == FUNCTION_TYPE
27169 || TREE_CODE (type) == METHOD_TYPE))
27170 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
27171 NULL_TREE,
27172 TYPE_ATTRIBUTES (type));
27174 #if TARGET_MACHO
27175 darwin_set_default_type_attributes (type);
27176 #endif
27179 /* Return a reference suitable for calling a function with the
27180 longcall attribute. */
27182 static rtx
27183 rs6000_longcall_ref (rtx call_ref, rtx arg)
27185 /* System V adds '.' to the internal name, so skip them. */
27186 const char *call_name = XSTR (call_ref, 0);
27187 if (*call_name == '.')
27189 while (*call_name == '.')
27190 call_name++;
27192 tree node = get_identifier (call_name);
27193 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
27196 if (TARGET_PLTSEQ)
27198 rtx base = const0_rtx;
27199 int regno = 12;
27200 if (rs6000_pcrel_p (cfun))
27202 rtx reg = gen_rtx_REG (Pmode, regno);
27203 rtx u = gen_rtx_UNSPEC (Pmode, gen_rtvec (3, base, call_ref, arg),
27204 UNSPEC_PLT_PCREL);
27205 emit_insn (gen_rtx_SET (reg, u));
27206 return reg;
27209 if (DEFAULT_ABI == ABI_ELFv2)
27210 base = gen_rtx_REG (Pmode, TOC_REGISTER);
27211 else
27213 if (flag_pic)
27214 base = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
27215 regno = 11;
27217 /* Reg must match that used by linker PLT stubs. For ELFv2, r12
27218 may be used by a function global entry point. For SysV4, r11
27219 is used by __glink_PLTresolve lazy resolver entry. */
27220 rtx reg = gen_rtx_REG (Pmode, regno);
27221 rtx hi = gen_rtx_UNSPEC (Pmode, gen_rtvec (3, base, call_ref, arg),
27222 UNSPEC_PLT16_HA);
27223 rtx lo = gen_rtx_UNSPEC (Pmode, gen_rtvec (3, reg, call_ref, arg),
27224 UNSPEC_PLT16_LO);
27225 emit_insn (gen_rtx_SET (reg, hi));
27226 emit_insn (gen_rtx_SET (reg, lo));
27227 return reg;
27230 return force_reg (Pmode, call_ref);
27233 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
27234 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
27235 #endif
27237 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
27238 struct attribute_spec.handler. */
27239 static tree
27240 rs6000_handle_struct_attribute (tree *node, tree name,
27241 tree args ATTRIBUTE_UNUSED,
27242 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
27244 tree *type = NULL;
27245 if (DECL_P (*node))
27247 if (TREE_CODE (*node) == TYPE_DECL)
27248 type = &TREE_TYPE (*node);
27250 else
27251 type = node;
27253 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
27254 || TREE_CODE (*type) == UNION_TYPE)))
27256 warning (OPT_Wattributes, "%qE attribute ignored", name);
27257 *no_add_attrs = true;
27260 else if ((is_attribute_p ("ms_struct", name)
27261 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
27262 || ((is_attribute_p ("gcc_struct", name)
27263 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
27265 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
27266 name);
27267 *no_add_attrs = true;
27270 return NULL_TREE;
27273 static bool
27274 rs6000_ms_bitfield_layout_p (const_tree record_type)
27276 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
27277 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
27278 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
27281 #ifdef USING_ELFOS_H
27283 /* A get_unnamed_section callback, used for switching to toc_section. */
27285 static void
27286 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
27288 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27289 && TARGET_MINIMAL_TOC)
27291 if (!toc_initialized)
27293 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
27294 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
27295 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
27296 fprintf (asm_out_file, "\t.tc ");
27297 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
27298 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
27299 fprintf (asm_out_file, "\n");
27301 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
27302 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
27303 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
27304 fprintf (asm_out_file, " = .+32768\n");
27305 toc_initialized = 1;
27307 else
27308 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
27310 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27312 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
27313 if (!toc_initialized)
27315 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
27316 toc_initialized = 1;
27319 else
27321 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
27322 if (!toc_initialized)
27324 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
27325 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
27326 fprintf (asm_out_file, " = .+32768\n");
27327 toc_initialized = 1;
27332 /* Implement TARGET_ASM_INIT_SECTIONS. */
27334 static void
27335 rs6000_elf_asm_init_sections (void)
27337 toc_section
27338 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
27340 sdata2_section
27341 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
27342 SDATA2_SECTION_ASM_OP);
27345 /* Implement TARGET_SELECT_RTX_SECTION. */
27347 static section *
27348 rs6000_elf_select_rtx_section (machine_mode mode, rtx x,
27349 unsigned HOST_WIDE_INT align)
27351 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
27352 return toc_section;
27353 else
27354 return default_elf_select_rtx_section (mode, x, align);
27357 /* For a SYMBOL_REF, set generic flags and then perform some
27358 target-specific processing.
27360 When the AIX ABI is requested on a non-AIX system, replace the
27361 function name with the real name (with a leading .) rather than the
27362 function descriptor name. This saves a lot of overriding code to
27363 read the prefixes. */
27365 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
27366 static void
27367 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
27369 default_encode_section_info (decl, rtl, first);
27371 if (first
27372 && TREE_CODE (decl) == FUNCTION_DECL
27373 && !TARGET_AIX
27374 && DEFAULT_ABI == ABI_AIX)
27376 rtx sym_ref = XEXP (rtl, 0);
27377 size_t len = strlen (XSTR (sym_ref, 0));
27378 char *str = XALLOCAVEC (char, len + 2);
27379 str[0] = '.';
27380 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
27381 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
27385 static inline bool
27386 compare_section_name (const char *section, const char *templ)
27388 int len;
27390 len = strlen (templ);
27391 return (strncmp (section, templ, len) == 0
27392 && (section[len] == 0 || section[len] == '.'));
27395 bool
27396 rs6000_elf_in_small_data_p (const_tree decl)
27398 if (rs6000_sdata == SDATA_NONE)
27399 return false;
27401 /* We want to merge strings, so we never consider them small data. */
27402 if (TREE_CODE (decl) == STRING_CST)
27403 return false;
27405 /* Functions are never in the small data area. */
27406 if (TREE_CODE (decl) == FUNCTION_DECL)
27407 return false;
27409 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
27411 const char *section = DECL_SECTION_NAME (decl);
27412 if (compare_section_name (section, ".sdata")
27413 || compare_section_name (section, ".sdata2")
27414 || compare_section_name (section, ".gnu.linkonce.s")
27415 || compare_section_name (section, ".sbss")
27416 || compare_section_name (section, ".sbss2")
27417 || compare_section_name (section, ".gnu.linkonce.sb")
27418 || strcmp (section, ".PPC.EMB.sdata0") == 0
27419 || strcmp (section, ".PPC.EMB.sbss0") == 0)
27420 return true;
27422 else
27424 /* If we are told not to put readonly data in sdata, then don't. */
27425 if (TREE_READONLY (decl) && rs6000_sdata != SDATA_EABI
27426 && !rs6000_readonly_in_sdata)
27427 return false;
27429 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
27431 if (size > 0
27432 && size <= g_switch_value
27433 /* If it's not public, and we're not going to reference it there,
27434 there's no need to put it in the small data section. */
27435 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
27436 return true;
27439 return false;
27442 #endif /* USING_ELFOS_H */
27444 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
27446 static bool
27447 rs6000_use_blocks_for_constant_p (machine_mode mode, const_rtx x)
27449 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
27452 /* Do not place thread-local symbols refs in the object blocks. */
27454 static bool
27455 rs6000_use_blocks_for_decl_p (const_tree decl)
27457 return !DECL_THREAD_LOCAL_P (decl);
27460 /* Return a REG that occurs in ADDR with coefficient 1.
27461 ADDR can be effectively incremented by incrementing REG.
27463 r0 is special and we must not select it as an address
27464 register by this routine since our caller will try to
27465 increment the returned register via an "la" instruction. */
27468 find_addr_reg (rtx addr)
27470 while (GET_CODE (addr) == PLUS)
27472 if (REG_P (XEXP (addr, 0))
27473 && REGNO (XEXP (addr, 0)) != 0)
27474 addr = XEXP (addr, 0);
27475 else if (REG_P (XEXP (addr, 1))
27476 && REGNO (XEXP (addr, 1)) != 0)
27477 addr = XEXP (addr, 1);
27478 else if (CONSTANT_P (XEXP (addr, 0)))
27479 addr = XEXP (addr, 1);
27480 else if (CONSTANT_P (XEXP (addr, 1)))
27481 addr = XEXP (addr, 0);
27482 else
27483 gcc_unreachable ();
27485 gcc_assert (REG_P (addr) && REGNO (addr) != 0);
27486 return addr;
27489 void
27490 rs6000_fatal_bad_address (rtx op)
27492 fatal_insn ("bad address", op);
27495 #if TARGET_MACHO
27497 vec<branch_island, va_gc> *branch_islands;
27499 /* Remember to generate a branch island for far calls to the given
27500 function. */
27502 static void
27503 add_compiler_branch_island (tree label_name, tree function_name,
27504 int line_number)
27506 branch_island bi = {function_name, label_name, line_number};
27507 vec_safe_push (branch_islands, bi);
27510 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
27511 already there or not. */
27513 static int
27514 no_previous_def (tree function_name)
27516 branch_island *bi;
27517 unsigned ix;
27519 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
27520 if (function_name == bi->function_name)
27521 return 0;
27522 return 1;
27525 /* GET_PREV_LABEL gets the label name from the previous definition of
27526 the function. */
27528 static tree
27529 get_prev_label (tree function_name)
27531 branch_island *bi;
27532 unsigned ix;
27534 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
27535 if (function_name == bi->function_name)
27536 return bi->label_name;
27537 return NULL_TREE;
27540 /* Generate PIC and indirect symbol stubs. */
27542 void
27543 machopic_output_stub (FILE *file, const char *symb, const char *stub)
27545 unsigned int length;
27546 char *symbol_name, *lazy_ptr_name;
27547 char *local_label_0;
27548 static unsigned label = 0;
27550 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
27551 symb = (*targetm.strip_name_encoding) (symb);
27554 length = strlen (symb);
27555 symbol_name = XALLOCAVEC (char, length + 32);
27556 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
27558 lazy_ptr_name = XALLOCAVEC (char, length + 32);
27559 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
27561 if (flag_pic == 2)
27562 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
27563 else
27564 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
27566 if (flag_pic == 2)
27568 fprintf (file, "\t.align 5\n");
27570 fprintf (file, "%s:\n", stub);
27571 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
27573 label++;
27574 local_label_0 = XALLOCAVEC (char, 16);
27575 sprintf (local_label_0, "L%u$spb", label);
27577 fprintf (file, "\tmflr r0\n");
27578 if (TARGET_LINK_STACK)
27580 char name[32];
27581 get_ppc476_thunk_name (name);
27582 fprintf (file, "\tbl %s\n", name);
27583 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
27585 else
27587 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
27588 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
27590 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
27591 lazy_ptr_name, local_label_0);
27592 fprintf (file, "\tmtlr r0\n");
27593 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
27594 (TARGET_64BIT ? "ldu" : "lwzu"),
27595 lazy_ptr_name, local_label_0);
27596 fprintf (file, "\tmtctr r12\n");
27597 fprintf (file, "\tbctr\n");
27599 else
27601 fprintf (file, "\t.align 4\n");
27603 fprintf (file, "%s:\n", stub);
27604 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
27606 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
27607 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
27608 (TARGET_64BIT ? "ldu" : "lwzu"),
27609 lazy_ptr_name);
27610 fprintf (file, "\tmtctr r12\n");
27611 fprintf (file, "\tbctr\n");
27614 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
27615 fprintf (file, "%s:\n", lazy_ptr_name);
27616 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
27617 fprintf (file, "%sdyld_stub_binding_helper\n",
27618 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
27621 /* Legitimize PIC addresses. If the address is already
27622 position-independent, we return ORIG. Newly generated
27623 position-independent addresses go into a reg. This is REG if non
27624 zero, otherwise we allocate register(s) as necessary. */
27626 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
27629 rs6000_machopic_legitimize_pic_address (rtx orig, machine_mode mode,
27630 rtx reg)
27632 rtx base, offset;
27634 if (reg == NULL && !reload_completed)
27635 reg = gen_reg_rtx (Pmode);
27637 if (GET_CODE (orig) == CONST)
27639 rtx reg_temp;
27641 if (GET_CODE (XEXP (orig, 0)) == PLUS
27642 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
27643 return orig;
27645 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
27647 /* Use a different reg for the intermediate value, as
27648 it will be marked UNCHANGING. */
27649 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
27650 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
27651 Pmode, reg_temp);
27652 offset =
27653 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
27654 Pmode, reg);
27656 if (CONST_INT_P (offset))
27658 if (SMALL_INT (offset))
27659 return plus_constant (Pmode, base, INTVAL (offset));
27660 else if (!reload_completed)
27661 offset = force_reg (Pmode, offset);
27662 else
27664 rtx mem = force_const_mem (Pmode, orig);
27665 return machopic_legitimize_pic_address (mem, Pmode, reg);
27668 return gen_rtx_PLUS (Pmode, base, offset);
27671 /* Fall back on generic machopic code. */
27672 return machopic_legitimize_pic_address (orig, mode, reg);
27675 /* Output a .machine directive for the Darwin assembler, and call
27676 the generic start_file routine. */
27678 static void
27679 rs6000_darwin_file_start (void)
27681 static const struct
27683 const char *arg;
27684 const char *name;
27685 HOST_WIDE_INT if_set;
27686 } mapping[] = {
27687 { "ppc64", "ppc64", MASK_64BIT },
27688 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
27689 { "power4", "ppc970", 0 },
27690 { "G5", "ppc970", 0 },
27691 { "7450", "ppc7450", 0 },
27692 { "7400", "ppc7400", MASK_ALTIVEC },
27693 { "G4", "ppc7400", 0 },
27694 { "750", "ppc750", 0 },
27695 { "740", "ppc750", 0 },
27696 { "G3", "ppc750", 0 },
27697 { "604e", "ppc604e", 0 },
27698 { "604", "ppc604", 0 },
27699 { "603e", "ppc603", 0 },
27700 { "603", "ppc603", 0 },
27701 { "601", "ppc601", 0 },
27702 { NULL, "ppc", 0 } };
27703 const char *cpu_id = "";
27704 size_t i;
27706 rs6000_file_start ();
27707 darwin_file_start ();
27709 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
27711 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
27712 cpu_id = rs6000_default_cpu;
27714 if (global_options_set.x_rs6000_cpu_index)
27715 cpu_id = processor_target_table[rs6000_cpu_index].name;
27717 /* Look through the mapping array. Pick the first name that either
27718 matches the argument, has a bit set in IF_SET that is also set
27719 in the target flags, or has a NULL name. */
27721 i = 0;
27722 while (mapping[i].arg != NULL
27723 && strcmp (mapping[i].arg, cpu_id) != 0
27724 && (mapping[i].if_set & rs6000_isa_flags) == 0)
27725 i++;
27727 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
27730 #endif /* TARGET_MACHO */
27732 #if TARGET_ELF
27733 static int
27734 rs6000_elf_reloc_rw_mask (void)
27736 if (flag_pic)
27737 return 3;
27738 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27739 return 2;
27740 else
27741 return 0;
27744 /* Record an element in the table of global constructors. SYMBOL is
27745 a SYMBOL_REF of the function to be called; PRIORITY is a number
27746 between 0 and MAX_INIT_PRIORITY.
27748 This differs from default_named_section_asm_out_constructor in
27749 that we have special handling for -mrelocatable. */
27751 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
27752 static void
27753 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
27755 const char *section = ".ctors";
27756 char buf[18];
27758 if (priority != DEFAULT_INIT_PRIORITY)
27760 sprintf (buf, ".ctors.%.5u",
27761 /* Invert the numbering so the linker puts us in the proper
27762 order; constructors are run from right to left, and the
27763 linker sorts in increasing order. */
27764 MAX_INIT_PRIORITY - priority);
27765 section = buf;
27768 switch_to_section (get_section (section, SECTION_WRITE, NULL));
27769 assemble_align (POINTER_SIZE);
27771 if (DEFAULT_ABI == ABI_V4
27772 && (TARGET_RELOCATABLE || flag_pic > 1))
27774 fputs ("\t.long (", asm_out_file);
27775 output_addr_const (asm_out_file, symbol);
27776 fputs (")@fixup\n", asm_out_file);
27778 else
27779 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
27782 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
27783 static void
27784 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
27786 const char *section = ".dtors";
27787 char buf[18];
27789 if (priority != DEFAULT_INIT_PRIORITY)
27791 sprintf (buf, ".dtors.%.5u",
27792 /* Invert the numbering so the linker puts us in the proper
27793 order; constructors are run from right to left, and the
27794 linker sorts in increasing order. */
27795 MAX_INIT_PRIORITY - priority);
27796 section = buf;
27799 switch_to_section (get_section (section, SECTION_WRITE, NULL));
27800 assemble_align (POINTER_SIZE);
27802 if (DEFAULT_ABI == ABI_V4
27803 && (TARGET_RELOCATABLE || flag_pic > 1))
27805 fputs ("\t.long (", asm_out_file);
27806 output_addr_const (asm_out_file, symbol);
27807 fputs (")@fixup\n", asm_out_file);
27809 else
27810 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
27813 void
27814 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
27816 if (TARGET_64BIT && DEFAULT_ABI != ABI_ELFv2)
27818 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
27819 ASM_OUTPUT_LABEL (file, name);
27820 fputs (DOUBLE_INT_ASM_OP, file);
27821 rs6000_output_function_entry (file, name);
27822 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
27823 if (DOT_SYMBOLS)
27825 fputs ("\t.size\t", file);
27826 assemble_name (file, name);
27827 fputs (",24\n\t.type\t.", file);
27828 assemble_name (file, name);
27829 fputs (",@function\n", file);
27830 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
27832 fputs ("\t.globl\t.", file);
27833 assemble_name (file, name);
27834 putc ('\n', file);
27837 else
27838 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
27839 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
27840 rs6000_output_function_entry (file, name);
27841 fputs (":\n", file);
27842 return;
27845 int uses_toc;
27846 if (DEFAULT_ABI == ABI_V4
27847 && (TARGET_RELOCATABLE || flag_pic > 1)
27848 && !TARGET_SECURE_PLT
27849 && (!constant_pool_empty_p () || crtl->profile)
27850 && (uses_toc = uses_TOC ()))
27852 char buf[256];
27854 if (uses_toc == 2)
27855 switch_to_other_text_partition ();
27856 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
27858 fprintf (file, "\t.long ");
27859 assemble_name (file, toc_label_name);
27860 need_toc_init = 1;
27861 putc ('-', file);
27862 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27863 assemble_name (file, buf);
27864 putc ('\n', file);
27865 if (uses_toc == 2)
27866 switch_to_other_text_partition ();
27869 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
27870 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
27872 if (TARGET_CMODEL == CMODEL_LARGE
27873 && rs6000_global_entry_point_prologue_needed_p ())
27875 char buf[256];
27877 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
27879 fprintf (file, "\t.quad .TOC.-");
27880 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27881 assemble_name (file, buf);
27882 putc ('\n', file);
27885 if (DEFAULT_ABI == ABI_AIX)
27887 const char *desc_name, *orig_name;
27889 orig_name = (*targetm.strip_name_encoding) (name);
27890 desc_name = orig_name;
27891 while (*desc_name == '.')
27892 desc_name++;
27894 if (TREE_PUBLIC (decl))
27895 fprintf (file, "\t.globl %s\n", desc_name);
27897 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
27898 fprintf (file, "%s:\n", desc_name);
27899 fprintf (file, "\t.long %s\n", orig_name);
27900 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
27901 fputs ("\t.long 0\n", file);
27902 fprintf (file, "\t.previous\n");
27904 ASM_OUTPUT_LABEL (file, name);
27907 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
27908 static void
27909 rs6000_elf_file_end (void)
27911 #ifdef HAVE_AS_GNU_ATTRIBUTE
27912 /* ??? The value emitted depends on options active at file end.
27913 Assume anyone using #pragma or attributes that might change
27914 options knows what they are doing. */
27915 if ((TARGET_64BIT || DEFAULT_ABI == ABI_V4)
27916 && rs6000_passes_float)
27918 int fp;
27920 if (TARGET_HARD_FLOAT)
27921 fp = 1;
27922 else
27923 fp = 2;
27924 if (rs6000_passes_long_double)
27926 if (!TARGET_LONG_DOUBLE_128)
27927 fp |= 2 * 4;
27928 else if (TARGET_IEEEQUAD)
27929 fp |= 3 * 4;
27930 else
27931 fp |= 1 * 4;
27933 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n", fp);
27935 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
27937 if (rs6000_passes_vector)
27938 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
27939 (TARGET_ALTIVEC_ABI ? 2 : 1));
27940 if (rs6000_returns_struct)
27941 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
27942 aix_struct_return ? 2 : 1);
27944 #endif
27945 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
27946 if (TARGET_32BIT || DEFAULT_ABI == ABI_ELFv2)
27947 file_end_indicate_exec_stack ();
27948 #endif
27950 if (flag_split_stack)
27951 file_end_indicate_split_stack ();
27953 if (cpu_builtin_p)
27955 /* We have expanded a CPU builtin, so we need to emit a reference to
27956 the special symbol that LIBC uses to declare it supports the
27957 AT_PLATFORM and AT_HWCAP/AT_HWCAP2 in the TCB feature. */
27958 switch_to_section (data_section);
27959 fprintf (asm_out_file, "\t.align %u\n", TARGET_32BIT ? 2 : 3);
27960 fprintf (asm_out_file, "\t%s %s\n",
27961 TARGET_32BIT ? ".long" : ".quad", tcb_verification_symbol);
27964 #endif
27966 #if TARGET_XCOFF
27968 #ifndef HAVE_XCOFF_DWARF_EXTRAS
27969 #define HAVE_XCOFF_DWARF_EXTRAS 0
27970 #endif
27972 static enum unwind_info_type
27973 rs6000_xcoff_debug_unwind_info (void)
27975 return UI_NONE;
27978 static void
27979 rs6000_xcoff_asm_output_anchor (rtx symbol)
27981 char buffer[100];
27983 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
27984 SYMBOL_REF_BLOCK_OFFSET (symbol));
27985 fprintf (asm_out_file, "%s", SET_ASM_OP);
27986 RS6000_OUTPUT_BASENAME (asm_out_file, XSTR (symbol, 0));
27987 fprintf (asm_out_file, ",");
27988 RS6000_OUTPUT_BASENAME (asm_out_file, buffer);
27989 fprintf (asm_out_file, "\n");
27992 static void
27993 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
27995 fputs (GLOBAL_ASM_OP, stream);
27996 RS6000_OUTPUT_BASENAME (stream, name);
27997 putc ('\n', stream);
28000 /* A get_unnamed_decl callback, used for read-only sections. PTR
28001 points to the section string variable. */
28003 static void
28004 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
28006 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
28007 *(const char *const *) directive,
28008 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
28011 /* Likewise for read-write sections. */
28013 static void
28014 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
28016 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
28017 *(const char *const *) directive,
28018 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
28021 static void
28022 rs6000_xcoff_output_tls_section_asm_op (const void *directive)
28024 fprintf (asm_out_file, "\t.csect %s[TL],%s\n",
28025 *(const char *const *) directive,
28026 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
28029 /* A get_unnamed_section callback, used for switching to toc_section. */
28031 static void
28032 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
28034 if (TARGET_MINIMAL_TOC)
28036 /* toc_section is always selected at least once from
28037 rs6000_xcoff_file_start, so this is guaranteed to
28038 always be defined once and only once in each file. */
28039 if (!toc_initialized)
28041 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
28042 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
28043 toc_initialized = 1;
28045 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
28046 (TARGET_32BIT ? "" : ",3"));
28048 else
28049 fputs ("\t.toc\n", asm_out_file);
28052 /* Implement TARGET_ASM_INIT_SECTIONS. */
28054 static void
28055 rs6000_xcoff_asm_init_sections (void)
28057 read_only_data_section
28058 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
28059 &xcoff_read_only_section_name);
28061 private_data_section
28062 = get_unnamed_section (SECTION_WRITE,
28063 rs6000_xcoff_output_readwrite_section_asm_op,
28064 &xcoff_private_data_section_name);
28066 read_only_private_data_section
28067 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
28068 &xcoff_private_rodata_section_name);
28070 tls_data_section
28071 = get_unnamed_section (SECTION_TLS,
28072 rs6000_xcoff_output_tls_section_asm_op,
28073 &xcoff_tls_data_section_name);
28075 tls_private_data_section
28076 = get_unnamed_section (SECTION_TLS,
28077 rs6000_xcoff_output_tls_section_asm_op,
28078 &xcoff_private_data_section_name);
28080 toc_section
28081 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
28083 readonly_data_section = read_only_data_section;
28086 static int
28087 rs6000_xcoff_reloc_rw_mask (void)
28089 return 3;
28092 static void
28093 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
28094 tree decl ATTRIBUTE_UNUSED)
28096 int smclass;
28097 static const char * const suffix[5] = { "PR", "RO", "RW", "TL", "XO" };
28099 if (flags & SECTION_EXCLUDE)
28100 smclass = 4;
28101 else if (flags & SECTION_DEBUG)
28103 fprintf (asm_out_file, "\t.dwsect %s\n", name);
28104 return;
28106 else if (flags & SECTION_CODE)
28107 smclass = 0;
28108 else if (flags & SECTION_TLS)
28109 smclass = 3;
28110 else if (flags & SECTION_WRITE)
28111 smclass = 2;
28112 else
28113 smclass = 1;
28115 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
28116 (flags & SECTION_CODE) ? "." : "",
28117 name, suffix[smclass], flags & SECTION_ENTSIZE);
28120 #define IN_NAMED_SECTION(DECL) \
28121 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
28122 && DECL_SECTION_NAME (DECL) != NULL)
28124 static section *
28125 rs6000_xcoff_select_section (tree decl, int reloc,
28126 unsigned HOST_WIDE_INT align)
28128 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
28129 named section. */
28130 if (align > BIGGEST_ALIGNMENT)
28132 resolve_unique_section (decl, reloc, true);
28133 if (IN_NAMED_SECTION (decl))
28134 return get_named_section (decl, NULL, reloc);
28137 if (decl_readonly_section (decl, reloc))
28139 if (TREE_PUBLIC (decl))
28140 return read_only_data_section;
28141 else
28142 return read_only_private_data_section;
28144 else
28146 #if HAVE_AS_TLS
28147 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
28149 if (TREE_PUBLIC (decl))
28150 return tls_data_section;
28151 else if (bss_initializer_p (decl))
28153 /* Convert to COMMON to emit in BSS. */
28154 DECL_COMMON (decl) = 1;
28155 return tls_comm_section;
28157 else
28158 return tls_private_data_section;
28160 else
28161 #endif
28162 if (TREE_PUBLIC (decl))
28163 return data_section;
28164 else
28165 return private_data_section;
28169 static void
28170 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
28172 const char *name;
28174 /* Use select_section for private data and uninitialized data with
28175 alignment <= BIGGEST_ALIGNMENT. */
28176 if (!TREE_PUBLIC (decl)
28177 || DECL_COMMON (decl)
28178 || (DECL_INITIAL (decl) == NULL_TREE
28179 && DECL_ALIGN (decl) <= BIGGEST_ALIGNMENT)
28180 || DECL_INITIAL (decl) == error_mark_node
28181 || (flag_zero_initialized_in_bss
28182 && initializer_zerop (DECL_INITIAL (decl))))
28183 return;
28185 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
28186 name = (*targetm.strip_name_encoding) (name);
28187 set_decl_section_name (decl, name);
28190 /* Select section for constant in constant pool.
28192 On RS/6000, all constants are in the private read-only data area.
28193 However, if this is being placed in the TOC it must be output as a
28194 toc entry. */
28196 static section *
28197 rs6000_xcoff_select_rtx_section (machine_mode mode, rtx x,
28198 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
28200 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
28201 return toc_section;
28202 else
28203 return read_only_private_data_section;
28206 /* Remove any trailing [DS] or the like from the symbol name. */
28208 static const char *
28209 rs6000_xcoff_strip_name_encoding (const char *name)
28211 size_t len;
28212 if (*name == '*')
28213 name++;
28214 len = strlen (name);
28215 if (name[len - 1] == ']')
28216 return ggc_alloc_string (name, len - 4);
28217 else
28218 return name;
28221 /* Section attributes. AIX is always PIC. */
28223 static unsigned int
28224 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
28226 unsigned int align;
28227 unsigned int flags = default_section_type_flags (decl, name, reloc);
28229 /* Align to at least UNIT size. */
28230 if ((flags & SECTION_CODE) != 0 || !decl || !DECL_P (decl))
28231 align = MIN_UNITS_PER_WORD;
28232 else
28233 /* Increase alignment of large objects if not already stricter. */
28234 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
28235 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
28236 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
28238 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
28241 /* Output at beginning of assembler file.
28243 Initialize the section names for the RS/6000 at this point.
28245 Specify filename, including full path, to assembler.
28247 We want to go into the TOC section so at least one .toc will be emitted.
28248 Also, in order to output proper .bs/.es pairs, we need at least one static
28249 [RW] section emitted.
28251 Finally, declare mcount when profiling to make the assembler happy. */
28253 static void
28254 rs6000_xcoff_file_start (void)
28256 rs6000_gen_section_name (&xcoff_bss_section_name,
28257 main_input_filename, ".bss_");
28258 rs6000_gen_section_name (&xcoff_private_data_section_name,
28259 main_input_filename, ".rw_");
28260 rs6000_gen_section_name (&xcoff_private_rodata_section_name,
28261 main_input_filename, ".rop_");
28262 rs6000_gen_section_name (&xcoff_read_only_section_name,
28263 main_input_filename, ".ro_");
28264 rs6000_gen_section_name (&xcoff_tls_data_section_name,
28265 main_input_filename, ".tls_");
28266 rs6000_gen_section_name (&xcoff_tbss_section_name,
28267 main_input_filename, ".tbss_[UL]");
28269 fputs ("\t.file\t", asm_out_file);
28270 output_quoted_string (asm_out_file, main_input_filename);
28271 fputc ('\n', asm_out_file);
28272 if (write_symbols != NO_DEBUG)
28273 switch_to_section (private_data_section);
28274 switch_to_section (toc_section);
28275 switch_to_section (text_section);
28276 if (profile_flag)
28277 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
28278 rs6000_file_start ();
28281 /* Output at end of assembler file.
28282 On the RS/6000, referencing data should automatically pull in text. */
28284 static void
28285 rs6000_xcoff_file_end (void)
28287 switch_to_section (text_section);
28288 fputs ("_section_.text:\n", asm_out_file);
28289 switch_to_section (data_section);
28290 fputs (TARGET_32BIT
28291 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
28292 asm_out_file);
28295 struct declare_alias_data
28297 FILE *file;
28298 bool function_descriptor;
28301 /* Declare alias N. A helper function for for_node_and_aliases. */
28303 static bool
28304 rs6000_declare_alias (struct symtab_node *n, void *d)
28306 struct declare_alias_data *data = (struct declare_alias_data *)d;
28307 /* Main symbol is output specially, because varasm machinery does part of
28308 the job for us - we do not need to declare .globl/lglobs and such. */
28309 if (!n->alias || n->weakref)
28310 return false;
28312 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n->decl)))
28313 return false;
28315 /* Prevent assemble_alias from trying to use .set pseudo operation
28316 that does not behave as expected by the middle-end. */
28317 TREE_ASM_WRITTEN (n->decl) = true;
28319 const char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n->decl));
28320 char *buffer = (char *) alloca (strlen (name) + 2);
28321 char *p;
28322 int dollar_inside = 0;
28324 strcpy (buffer, name);
28325 p = strchr (buffer, '$');
28326 while (p) {
28327 *p = '_';
28328 dollar_inside++;
28329 p = strchr (p + 1, '$');
28331 if (TREE_PUBLIC (n->decl))
28333 if (!RS6000_WEAK || !DECL_WEAK (n->decl))
28335 if (dollar_inside) {
28336 if (data->function_descriptor)
28337 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
28338 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
28340 if (data->function_descriptor)
28342 fputs ("\t.globl .", data->file);
28343 RS6000_OUTPUT_BASENAME (data->file, buffer);
28344 putc ('\n', data->file);
28346 fputs ("\t.globl ", data->file);
28347 RS6000_OUTPUT_BASENAME (data->file, buffer);
28348 putc ('\n', data->file);
28350 #ifdef ASM_WEAKEN_DECL
28351 else if (DECL_WEAK (n->decl) && !data->function_descriptor)
28352 ASM_WEAKEN_DECL (data->file, n->decl, name, NULL);
28353 #endif
28355 else
28357 if (dollar_inside)
28359 if (data->function_descriptor)
28360 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
28361 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
28363 if (data->function_descriptor)
28365 fputs ("\t.lglobl .", data->file);
28366 RS6000_OUTPUT_BASENAME (data->file, buffer);
28367 putc ('\n', data->file);
28369 fputs ("\t.lglobl ", data->file);
28370 RS6000_OUTPUT_BASENAME (data->file, buffer);
28371 putc ('\n', data->file);
28373 if (data->function_descriptor)
28374 fputs (".", data->file);
28375 RS6000_OUTPUT_BASENAME (data->file, buffer);
28376 fputs (":\n", data->file);
28377 return false;
28381 #ifdef HAVE_GAS_HIDDEN
28382 /* Helper function to calculate visibility of a DECL
28383 and return the value as a const string. */
28385 static const char *
28386 rs6000_xcoff_visibility (tree decl)
28388 static const char * const visibility_types[] = {
28389 "", ",protected", ",hidden", ",internal"
28392 enum symbol_visibility vis = DECL_VISIBILITY (decl);
28393 return visibility_types[vis];
28395 #endif
28398 /* This macro produces the initial definition of a function name.
28399 On the RS/6000, we need to place an extra '.' in the function name and
28400 output the function descriptor.
28401 Dollar signs are converted to underscores.
28403 The csect for the function will have already been created when
28404 text_section was selected. We do have to go back to that csect, however.
28406 The third and fourth parameters to the .function pseudo-op (16 and 044)
28407 are placeholders which no longer have any use.
28409 Because AIX assembler's .set command has unexpected semantics, we output
28410 all aliases as alternative labels in front of the definition. */
28412 void
28413 rs6000_xcoff_declare_function_name (FILE *file, const char *name, tree decl)
28415 char *buffer = (char *) alloca (strlen (name) + 1);
28416 char *p;
28417 int dollar_inside = 0;
28418 struct declare_alias_data data = {file, false};
28420 strcpy (buffer, name);
28421 p = strchr (buffer, '$');
28422 while (p) {
28423 *p = '_';
28424 dollar_inside++;
28425 p = strchr (p + 1, '$');
28427 if (TREE_PUBLIC (decl))
28429 if (!RS6000_WEAK || !DECL_WEAK (decl))
28431 if (dollar_inside) {
28432 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
28433 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
28435 fputs ("\t.globl .", file);
28436 RS6000_OUTPUT_BASENAME (file, buffer);
28437 #ifdef HAVE_GAS_HIDDEN
28438 fputs (rs6000_xcoff_visibility (decl), file);
28439 #endif
28440 putc ('\n', file);
28443 else
28445 if (dollar_inside) {
28446 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
28447 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
28449 fputs ("\t.lglobl .", file);
28450 RS6000_OUTPUT_BASENAME (file, buffer);
28451 putc ('\n', file);
28453 fputs ("\t.csect ", file);
28454 RS6000_OUTPUT_BASENAME (file, buffer);
28455 fputs (TARGET_32BIT ? "[DS]\n" : "[DS],3\n", file);
28456 RS6000_OUTPUT_BASENAME (file, buffer);
28457 fputs (":\n", file);
28458 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
28459 &data, true);
28460 fputs (TARGET_32BIT ? "\t.long ." : "\t.llong .", file);
28461 RS6000_OUTPUT_BASENAME (file, buffer);
28462 fputs (", TOC[tc0], 0\n", file);
28463 in_section = NULL;
28464 switch_to_section (function_section (decl));
28465 putc ('.', file);
28466 RS6000_OUTPUT_BASENAME (file, buffer);
28467 fputs (":\n", file);
28468 data.function_descriptor = true;
28469 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
28470 &data, true);
28471 if (!DECL_IGNORED_P (decl))
28473 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
28474 xcoffout_declare_function (file, decl, buffer);
28475 else if (write_symbols == DWARF2_DEBUG)
28477 name = (*targetm.strip_name_encoding) (name);
28478 fprintf (file, "\t.function .%s,.%s,2,0\n", name, name);
28481 return;
28485 /* Output assembly language to globalize a symbol from a DECL,
28486 possibly with visibility. */
28488 void
28489 rs6000_xcoff_asm_globalize_decl_name (FILE *stream, tree decl)
28491 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
28492 fputs (GLOBAL_ASM_OP, stream);
28493 RS6000_OUTPUT_BASENAME (stream, name);
28494 #ifdef HAVE_GAS_HIDDEN
28495 fputs (rs6000_xcoff_visibility (decl), stream);
28496 #endif
28497 putc ('\n', stream);
28500 /* Output assembly language to define a symbol as COMMON from a DECL,
28501 possibly with visibility. */
28503 void
28504 rs6000_xcoff_asm_output_aligned_decl_common (FILE *stream,
28505 tree decl ATTRIBUTE_UNUSED,
28506 const char *name,
28507 unsigned HOST_WIDE_INT size,
28508 unsigned HOST_WIDE_INT align)
28510 unsigned HOST_WIDE_INT align2 = 2;
28512 if (align > 32)
28513 align2 = floor_log2 (align / BITS_PER_UNIT);
28514 else if (size > 4)
28515 align2 = 3;
28517 fputs (COMMON_ASM_OP, stream);
28518 RS6000_OUTPUT_BASENAME (stream, name);
28520 fprintf (stream,
28521 "," HOST_WIDE_INT_PRINT_UNSIGNED "," HOST_WIDE_INT_PRINT_UNSIGNED,
28522 size, align2);
28524 #ifdef HAVE_GAS_HIDDEN
28525 if (decl != NULL)
28526 fputs (rs6000_xcoff_visibility (decl), stream);
28527 #endif
28528 putc ('\n', stream);
28531 /* This macro produces the initial definition of a object (variable) name.
28532 Because AIX assembler's .set command has unexpected semantics, we output
28533 all aliases as alternative labels in front of the definition. */
28535 void
28536 rs6000_xcoff_declare_object_name (FILE *file, const char *name, tree decl)
28538 struct declare_alias_data data = {file, false};
28539 RS6000_OUTPUT_BASENAME (file, name);
28540 fputs (":\n", file);
28541 symtab_node::get_create (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
28542 &data, true);
28545 /* Overide the default 'SYMBOL-.' syntax with AIX compatible 'SYMBOL-$'. */
28547 void
28548 rs6000_asm_output_dwarf_pcrel (FILE *file, int size, const char *label)
28550 fputs (integer_asm_op (size, FALSE), file);
28551 assemble_name (file, label);
28552 fputs ("-$", file);
28555 /* Output a symbol offset relative to the dbase for the current object.
28556 We use __gcc_unwind_dbase as an arbitrary base for dbase and assume
28557 signed offsets.
28559 __gcc_unwind_dbase is embedded in all executables/libraries through
28560 libgcc/config/rs6000/crtdbase.S. */
28562 void
28563 rs6000_asm_output_dwarf_datarel (FILE *file, int size, const char *label)
28565 fputs (integer_asm_op (size, FALSE), file);
28566 assemble_name (file, label);
28567 fputs("-__gcc_unwind_dbase", file);
28570 #ifdef HAVE_AS_TLS
28571 static void
28572 rs6000_xcoff_encode_section_info (tree decl, rtx rtl, int first)
28574 rtx symbol;
28575 int flags;
28576 const char *symname;
28578 default_encode_section_info (decl, rtl, first);
28580 /* Careful not to prod global register variables. */
28581 if (!MEM_P (rtl))
28582 return;
28583 symbol = XEXP (rtl, 0);
28584 if (!SYMBOL_REF_P (symbol))
28585 return;
28587 flags = SYMBOL_REF_FLAGS (symbol);
28589 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
28590 flags &= ~SYMBOL_FLAG_HAS_BLOCK_INFO;
28592 SYMBOL_REF_FLAGS (symbol) = flags;
28594 /* Append mapping class to extern decls. */
28595 symname = XSTR (symbol, 0);
28596 if (decl /* sync condition with assemble_external () */
28597 && DECL_P (decl) && DECL_EXTERNAL (decl) && TREE_PUBLIC (decl)
28598 && ((TREE_CODE (decl) == VAR_DECL && !DECL_THREAD_LOCAL_P (decl))
28599 || TREE_CODE (decl) == FUNCTION_DECL)
28600 && symname[strlen (symname) - 1] != ']')
28602 char *newname = (char *) alloca (strlen (symname) + 5);
28603 strcpy (newname, symname);
28604 strcat (newname, (TREE_CODE (decl) == FUNCTION_DECL
28605 ? "[DS]" : "[UA]"));
28606 XSTR (symbol, 0) = ggc_strdup (newname);
28609 #endif /* HAVE_AS_TLS */
28610 #endif /* TARGET_XCOFF */
28612 void
28613 rs6000_asm_weaken_decl (FILE *stream, tree decl,
28614 const char *name, const char *val)
28616 fputs ("\t.weak\t", stream);
28617 RS6000_OUTPUT_BASENAME (stream, name);
28618 if (decl && TREE_CODE (decl) == FUNCTION_DECL
28619 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
28621 if (TARGET_XCOFF)
28622 fputs ("[DS]", stream);
28623 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
28624 if (TARGET_XCOFF)
28625 fputs (rs6000_xcoff_visibility (decl), stream);
28626 #endif
28627 fputs ("\n\t.weak\t.", stream);
28628 RS6000_OUTPUT_BASENAME (stream, name);
28630 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
28631 if (TARGET_XCOFF)
28632 fputs (rs6000_xcoff_visibility (decl), stream);
28633 #endif
28634 fputc ('\n', stream);
28635 if (val)
28637 #ifdef ASM_OUTPUT_DEF
28638 ASM_OUTPUT_DEF (stream, name, val);
28639 #endif
28640 if (decl && TREE_CODE (decl) == FUNCTION_DECL
28641 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
28643 fputs ("\t.set\t.", stream);
28644 RS6000_OUTPUT_BASENAME (stream, name);
28645 fputs (",.", stream);
28646 RS6000_OUTPUT_BASENAME (stream, val);
28647 fputc ('\n', stream);
28653 /* Return true if INSN should not be copied. */
28655 static bool
28656 rs6000_cannot_copy_insn_p (rtx_insn *insn)
28658 return recog_memoized (insn) >= 0
28659 && get_attr_cannot_copy (insn);
28662 /* Compute a (partial) cost for rtx X. Return true if the complete
28663 cost has been computed, and false if subexpressions should be
28664 scanned. In either case, *TOTAL contains the cost result. */
28666 static bool
28667 rs6000_rtx_costs (rtx x, machine_mode mode, int outer_code,
28668 int opno ATTRIBUTE_UNUSED, int *total, bool speed)
28670 int code = GET_CODE (x);
28672 switch (code)
28674 /* On the RS/6000, if it is valid in the insn, it is free. */
28675 case CONST_INT:
28676 if (((outer_code == SET
28677 || outer_code == PLUS
28678 || outer_code == MINUS)
28679 && (satisfies_constraint_I (x)
28680 || satisfies_constraint_L (x)))
28681 || (outer_code == AND
28682 && (satisfies_constraint_K (x)
28683 || (mode == SImode
28684 ? satisfies_constraint_L (x)
28685 : satisfies_constraint_J (x))))
28686 || ((outer_code == IOR || outer_code == XOR)
28687 && (satisfies_constraint_K (x)
28688 || (mode == SImode
28689 ? satisfies_constraint_L (x)
28690 : satisfies_constraint_J (x))))
28691 || outer_code == ASHIFT
28692 || outer_code == ASHIFTRT
28693 || outer_code == LSHIFTRT
28694 || outer_code == ROTATE
28695 || outer_code == ROTATERT
28696 || outer_code == ZERO_EXTRACT
28697 || (outer_code == MULT
28698 && satisfies_constraint_I (x))
28699 || ((outer_code == DIV || outer_code == UDIV
28700 || outer_code == MOD || outer_code == UMOD)
28701 && exact_log2 (INTVAL (x)) >= 0)
28702 || (outer_code == COMPARE
28703 && (satisfies_constraint_I (x)
28704 || satisfies_constraint_K (x)))
28705 || ((outer_code == EQ || outer_code == NE)
28706 && (satisfies_constraint_I (x)
28707 || satisfies_constraint_K (x)
28708 || (mode == SImode
28709 ? satisfies_constraint_L (x)
28710 : satisfies_constraint_J (x))))
28711 || (outer_code == GTU
28712 && satisfies_constraint_I (x))
28713 || (outer_code == LTU
28714 && satisfies_constraint_P (x)))
28716 *total = 0;
28717 return true;
28719 else if ((outer_code == PLUS
28720 && reg_or_add_cint_operand (x, VOIDmode))
28721 || (outer_code == MINUS
28722 && reg_or_sub_cint_operand (x, VOIDmode))
28723 || ((outer_code == SET
28724 || outer_code == IOR
28725 || outer_code == XOR)
28726 && (INTVAL (x)
28727 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
28729 *total = COSTS_N_INSNS (1);
28730 return true;
28732 /* FALLTHRU */
28734 case CONST_DOUBLE:
28735 case CONST_WIDE_INT:
28736 case CONST:
28737 case HIGH:
28738 case SYMBOL_REF:
28739 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
28740 return true;
28742 case MEM:
28743 /* When optimizing for size, MEM should be slightly more expensive
28744 than generating address, e.g., (plus (reg) (const)).
28745 L1 cache latency is about two instructions. */
28746 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
28747 if (rs6000_slow_unaligned_access (mode, MEM_ALIGN (x)))
28748 *total += COSTS_N_INSNS (100);
28749 return true;
28751 case LABEL_REF:
28752 *total = 0;
28753 return true;
28755 case PLUS:
28756 case MINUS:
28757 if (FLOAT_MODE_P (mode))
28758 *total = rs6000_cost->fp;
28759 else
28760 *total = COSTS_N_INSNS (1);
28761 return false;
28763 case MULT:
28764 if (CONST_INT_P (XEXP (x, 1))
28765 && satisfies_constraint_I (XEXP (x, 1)))
28767 if (INTVAL (XEXP (x, 1)) >= -256
28768 && INTVAL (XEXP (x, 1)) <= 255)
28769 *total = rs6000_cost->mulsi_const9;
28770 else
28771 *total = rs6000_cost->mulsi_const;
28773 else if (mode == SFmode)
28774 *total = rs6000_cost->fp;
28775 else if (FLOAT_MODE_P (mode))
28776 *total = rs6000_cost->dmul;
28777 else if (mode == DImode)
28778 *total = rs6000_cost->muldi;
28779 else
28780 *total = rs6000_cost->mulsi;
28781 return false;
28783 case FMA:
28784 if (mode == SFmode)
28785 *total = rs6000_cost->fp;
28786 else
28787 *total = rs6000_cost->dmul;
28788 break;
28790 case DIV:
28791 case MOD:
28792 if (FLOAT_MODE_P (mode))
28794 *total = mode == DFmode ? rs6000_cost->ddiv
28795 : rs6000_cost->sdiv;
28796 return false;
28798 /* FALLTHRU */
28800 case UDIV:
28801 case UMOD:
28802 if (CONST_INT_P (XEXP (x, 1))
28803 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
28805 if (code == DIV || code == MOD)
28806 /* Shift, addze */
28807 *total = COSTS_N_INSNS (2);
28808 else
28809 /* Shift */
28810 *total = COSTS_N_INSNS (1);
28812 else
28814 if (GET_MODE (XEXP (x, 1)) == DImode)
28815 *total = rs6000_cost->divdi;
28816 else
28817 *total = rs6000_cost->divsi;
28819 /* Add in shift and subtract for MOD unless we have a mod instruction. */
28820 if (!TARGET_MODULO && (code == MOD || code == UMOD))
28821 *total += COSTS_N_INSNS (2);
28822 return false;
28824 case CTZ:
28825 *total = COSTS_N_INSNS (TARGET_CTZ ? 1 : 4);
28826 return false;
28828 case FFS:
28829 *total = COSTS_N_INSNS (4);
28830 return false;
28832 case POPCOUNT:
28833 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
28834 return false;
28836 case PARITY:
28837 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
28838 return false;
28840 case NOT:
28841 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
28842 *total = 0;
28843 else
28844 *total = COSTS_N_INSNS (1);
28845 return false;
28847 case AND:
28848 if (CONST_INT_P (XEXP (x, 1)))
28850 rtx left = XEXP (x, 0);
28851 rtx_code left_code = GET_CODE (left);
28853 /* rotate-and-mask: 1 insn. */
28854 if ((left_code == ROTATE
28855 || left_code == ASHIFT
28856 || left_code == LSHIFTRT)
28857 && rs6000_is_valid_shift_mask (XEXP (x, 1), left, mode))
28859 *total = rtx_cost (XEXP (left, 0), mode, left_code, 0, speed);
28860 if (!CONST_INT_P (XEXP (left, 1)))
28861 *total += rtx_cost (XEXP (left, 1), SImode, left_code, 1, speed);
28862 *total += COSTS_N_INSNS (1);
28863 return true;
28866 /* rotate-and-mask (no rotate), andi., andis.: 1 insn. */
28867 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
28868 if (rs6000_is_valid_and_mask (XEXP (x, 1), mode)
28869 || (val & 0xffff) == val
28870 || (val & 0xffff0000) == val
28871 || ((val & 0xffff) == 0 && mode == SImode))
28873 *total = rtx_cost (left, mode, AND, 0, speed);
28874 *total += COSTS_N_INSNS (1);
28875 return true;
28878 /* 2 insns. */
28879 if (rs6000_is_valid_2insn_and (XEXP (x, 1), mode))
28881 *total = rtx_cost (left, mode, AND, 0, speed);
28882 *total += COSTS_N_INSNS (2);
28883 return true;
28887 *total = COSTS_N_INSNS (1);
28888 return false;
28890 case IOR:
28891 /* FIXME */
28892 *total = COSTS_N_INSNS (1);
28893 return true;
28895 case CLZ:
28896 case XOR:
28897 case ZERO_EXTRACT:
28898 *total = COSTS_N_INSNS (1);
28899 return false;
28901 case ASHIFT:
28902 /* The EXTSWSLI instruction is a combined instruction. Don't count both
28903 the sign extend and shift separately within the insn. */
28904 if (TARGET_EXTSWSLI && mode == DImode
28905 && GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
28906 && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode)
28908 *total = 0;
28909 return false;
28911 /* fall through */
28913 case ASHIFTRT:
28914 case LSHIFTRT:
28915 case ROTATE:
28916 case ROTATERT:
28917 /* Handle mul_highpart. */
28918 if (outer_code == TRUNCATE
28919 && GET_CODE (XEXP (x, 0)) == MULT)
28921 if (mode == DImode)
28922 *total = rs6000_cost->muldi;
28923 else
28924 *total = rs6000_cost->mulsi;
28925 return true;
28927 else if (outer_code == AND)
28928 *total = 0;
28929 else
28930 *total = COSTS_N_INSNS (1);
28931 return false;
28933 case SIGN_EXTEND:
28934 case ZERO_EXTEND:
28935 if (MEM_P (XEXP (x, 0)))
28936 *total = 0;
28937 else
28938 *total = COSTS_N_INSNS (1);
28939 return false;
28941 case COMPARE:
28942 case NEG:
28943 case ABS:
28944 if (!FLOAT_MODE_P (mode))
28946 *total = COSTS_N_INSNS (1);
28947 return false;
28949 /* FALLTHRU */
28951 case FLOAT:
28952 case UNSIGNED_FLOAT:
28953 case FIX:
28954 case UNSIGNED_FIX:
28955 case FLOAT_TRUNCATE:
28956 *total = rs6000_cost->fp;
28957 return false;
28959 case FLOAT_EXTEND:
28960 if (mode == DFmode)
28961 *total = rs6000_cost->sfdf_convert;
28962 else
28963 *total = rs6000_cost->fp;
28964 return false;
28966 case UNSPEC:
28967 switch (XINT (x, 1))
28969 case UNSPEC_FRSP:
28970 *total = rs6000_cost->fp;
28971 return true;
28973 default:
28974 break;
28976 break;
28978 case CALL:
28979 case IF_THEN_ELSE:
28980 if (!speed)
28982 *total = COSTS_N_INSNS (1);
28983 return true;
28985 else if (FLOAT_MODE_P (mode) && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT)
28987 *total = rs6000_cost->fp;
28988 return false;
28990 break;
28992 case NE:
28993 case EQ:
28994 case GTU:
28995 case LTU:
28996 /* Carry bit requires mode == Pmode.
28997 NEG or PLUS already counted so only add one. */
28998 if (mode == Pmode
28999 && (outer_code == NEG || outer_code == PLUS))
29001 *total = COSTS_N_INSNS (1);
29002 return true;
29004 /* FALLTHRU */
29006 case GT:
29007 case LT:
29008 case UNORDERED:
29009 if (outer_code == SET)
29011 if (XEXP (x, 1) == const0_rtx)
29013 *total = COSTS_N_INSNS (2);
29014 return true;
29016 else
29018 *total = COSTS_N_INSNS (3);
29019 return false;
29022 /* CC COMPARE. */
29023 if (outer_code == COMPARE)
29025 *total = 0;
29026 return true;
29028 break;
29030 default:
29031 break;
29034 return false;
29037 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
29039 static bool
29040 rs6000_debug_rtx_costs (rtx x, machine_mode mode, int outer_code,
29041 int opno, int *total, bool speed)
29043 bool ret = rs6000_rtx_costs (x, mode, outer_code, opno, total, speed);
29045 fprintf (stderr,
29046 "\nrs6000_rtx_costs, return = %s, mode = %s, outer_code = %s, "
29047 "opno = %d, total = %d, speed = %s, x:\n",
29048 ret ? "complete" : "scan inner",
29049 GET_MODE_NAME (mode),
29050 GET_RTX_NAME (outer_code),
29051 opno,
29052 *total,
29053 speed ? "true" : "false");
29055 debug_rtx (x);
29057 return ret;
29060 static int
29061 rs6000_insn_cost (rtx_insn *insn, bool speed)
29063 if (recog_memoized (insn) < 0)
29064 return 0;
29066 if (!speed)
29067 return get_attr_length (insn);
29069 int cost = get_attr_cost (insn);
29070 if (cost > 0)
29071 return cost;
29073 int n = get_attr_length (insn) / 4;
29074 enum attr_type type = get_attr_type (insn);
29076 switch (type)
29078 case TYPE_LOAD:
29079 case TYPE_FPLOAD:
29080 case TYPE_VECLOAD:
29081 cost = COSTS_N_INSNS (n + 1);
29082 break;
29084 case TYPE_MUL:
29085 switch (get_attr_size (insn))
29087 case SIZE_8:
29088 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const9;
29089 break;
29090 case SIZE_16:
29091 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const;
29092 break;
29093 case SIZE_32:
29094 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi;
29095 break;
29096 case SIZE_64:
29097 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->muldi;
29098 break;
29099 default:
29100 gcc_unreachable ();
29102 break;
29103 case TYPE_DIV:
29104 switch (get_attr_size (insn))
29106 case SIZE_32:
29107 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divsi;
29108 break;
29109 case SIZE_64:
29110 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divdi;
29111 break;
29112 default:
29113 gcc_unreachable ();
29115 break;
29117 case TYPE_FP:
29118 cost = n * rs6000_cost->fp;
29119 break;
29120 case TYPE_DMUL:
29121 cost = n * rs6000_cost->dmul;
29122 break;
29123 case TYPE_SDIV:
29124 cost = n * rs6000_cost->sdiv;
29125 break;
29126 case TYPE_DDIV:
29127 cost = n * rs6000_cost->ddiv;
29128 break;
29130 case TYPE_SYNC:
29131 case TYPE_LOAD_L:
29132 case TYPE_MFCR:
29133 case TYPE_MFCRF:
29134 cost = COSTS_N_INSNS (n + 2);
29135 break;
29137 default:
29138 cost = COSTS_N_INSNS (n);
29141 return cost;
29144 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
29146 static int
29147 rs6000_debug_address_cost (rtx x, machine_mode mode,
29148 addr_space_t as, bool speed)
29150 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
29152 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
29153 ret, speed ? "true" : "false");
29154 debug_rtx (x);
29156 return ret;
29160 /* A C expression returning the cost of moving data from a register of class
29161 CLASS1 to one of CLASS2. */
29163 static int
29164 rs6000_register_move_cost (machine_mode mode,
29165 reg_class_t from, reg_class_t to)
29167 int ret;
29168 reg_class_t rclass;
29170 if (TARGET_DEBUG_COST)
29171 dbg_cost_ctrl++;
29173 /* If we have VSX, we can easily move between FPR or Altivec registers,
29174 otherwise we can only easily move within classes.
29175 Do this first so we give best-case answers for union classes
29176 containing both gprs and vsx regs. */
29177 HARD_REG_SET to_vsx, from_vsx;
29178 COPY_HARD_REG_SET (to_vsx, reg_class_contents[to]);
29179 AND_HARD_REG_SET (to_vsx, reg_class_contents[VSX_REGS]);
29180 COPY_HARD_REG_SET (from_vsx, reg_class_contents[from]);
29181 AND_HARD_REG_SET (from_vsx, reg_class_contents[VSX_REGS]);
29182 if (!hard_reg_set_empty_p (to_vsx)
29183 && !hard_reg_set_empty_p (from_vsx)
29184 && (TARGET_VSX
29185 || hard_reg_set_intersect_p (to_vsx, from_vsx)))
29187 int reg = FIRST_FPR_REGNO;
29188 if (TARGET_VSX
29189 || (TEST_HARD_REG_BIT (to_vsx, FIRST_ALTIVEC_REGNO)
29190 && TEST_HARD_REG_BIT (from_vsx, FIRST_ALTIVEC_REGNO)))
29191 reg = FIRST_ALTIVEC_REGNO;
29192 ret = 2 * hard_regno_nregs (reg, mode);
29195 /* Moves from/to GENERAL_REGS. */
29196 else if ((rclass = from, reg_classes_intersect_p (to, GENERAL_REGS))
29197 || (rclass = to, reg_classes_intersect_p (from, GENERAL_REGS)))
29199 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
29201 if (TARGET_DIRECT_MOVE)
29203 /* Keep the cost for direct moves above that for within
29204 a register class even if the actual processor cost is
29205 comparable. We do this because a direct move insn
29206 can't be a nop, whereas with ideal register
29207 allocation a move within the same class might turn
29208 out to be a nop. */
29209 if (rs6000_tune == PROCESSOR_POWER9
29210 || rs6000_tune == PROCESSOR_FUTURE)
29211 ret = 3 * hard_regno_nregs (FIRST_GPR_REGNO, mode);
29212 else
29213 ret = 4 * hard_regno_nregs (FIRST_GPR_REGNO, mode);
29214 /* SFmode requires a conversion when moving between gprs
29215 and vsx. */
29216 if (mode == SFmode)
29217 ret += 2;
29219 else
29220 ret = (rs6000_memory_move_cost (mode, rclass, false)
29221 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
29224 /* It's more expensive to move CR_REGS than CR0_REGS because of the
29225 shift. */
29226 else if (rclass == CR_REGS)
29227 ret = 4;
29229 /* For those processors that have slow LR/CTR moves, make them more
29230 expensive than memory in order to bias spills to memory .*/
29231 else if ((rs6000_tune == PROCESSOR_POWER6
29232 || rs6000_tune == PROCESSOR_POWER7
29233 || rs6000_tune == PROCESSOR_POWER8
29234 || rs6000_tune == PROCESSOR_POWER9)
29235 && reg_class_subset_p (rclass, SPECIAL_REGS))
29236 ret = 6 * hard_regno_nregs (FIRST_GPR_REGNO, mode);
29238 else
29239 /* A move will cost one instruction per GPR moved. */
29240 ret = 2 * hard_regno_nregs (FIRST_GPR_REGNO, mode);
29243 /* Everything else has to go through GENERAL_REGS. */
29244 else
29245 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
29246 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
29248 if (TARGET_DEBUG_COST)
29250 if (dbg_cost_ctrl == 1)
29251 fprintf (stderr,
29252 "rs6000_register_move_cost: ret=%d, mode=%s, from=%s, to=%s\n",
29253 ret, GET_MODE_NAME (mode), reg_class_names[from],
29254 reg_class_names[to]);
29255 dbg_cost_ctrl--;
29258 return ret;
29261 /* A C expressions returning the cost of moving data of MODE from a register to
29262 or from memory. */
29264 static int
29265 rs6000_memory_move_cost (machine_mode mode, reg_class_t rclass,
29266 bool in ATTRIBUTE_UNUSED)
29268 int ret;
29270 if (TARGET_DEBUG_COST)
29271 dbg_cost_ctrl++;
29273 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
29274 ret = 4 * hard_regno_nregs (0, mode);
29275 else if ((reg_classes_intersect_p (rclass, FLOAT_REGS)
29276 || reg_classes_intersect_p (rclass, VSX_REGS)))
29277 ret = 4 * hard_regno_nregs (32, mode);
29278 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
29279 ret = 4 * hard_regno_nregs (FIRST_ALTIVEC_REGNO, mode);
29280 else
29281 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
29283 if (TARGET_DEBUG_COST)
29285 if (dbg_cost_ctrl == 1)
29286 fprintf (stderr,
29287 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
29288 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
29289 dbg_cost_ctrl--;
29292 return ret;
29295 /* Implement TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS.
29297 The register allocator chooses GEN_OR_VSX_REGS for the allocno
29298 class if GENERAL_REGS and VSX_REGS cost is lower than the memory
29299 cost. This happens a lot when TARGET_DIRECT_MOVE makes the register
29300 move cost between GENERAL_REGS and VSX_REGS low.
29302 It might seem reasonable to use a union class. After all, if usage
29303 of vsr is low and gpr high, it might make sense to spill gpr to vsr
29304 rather than memory. However, in cases where register pressure of
29305 both is high, like the cactus_adm spec test, allowing
29306 GEN_OR_VSX_REGS as the allocno class results in bad decisions in
29307 the first scheduling pass. This is partly due to an allocno of
29308 GEN_OR_VSX_REGS wrongly contributing to the GENERAL_REGS pressure
29309 class, which gives too high a pressure for GENERAL_REGS and too low
29310 for VSX_REGS. So, force a choice of the subclass here.
29312 The best class is also the union if GENERAL_REGS and VSX_REGS have
29313 the same cost. In that case we do use GEN_OR_VSX_REGS as the
29314 allocno class, since trying to narrow down the class by regno mode
29315 is prone to error. For example, SImode is allowed in VSX regs and
29316 in some cases (eg. gcc.target/powerpc/p9-xxbr-3.c do_bswap32_vect)
29317 it would be wrong to choose an allocno of GENERAL_REGS based on
29318 SImode. */
29320 static reg_class_t
29321 rs6000_ira_change_pseudo_allocno_class (int regno ATTRIBUTE_UNUSED,
29322 reg_class_t allocno_class,
29323 reg_class_t best_class)
29325 switch (allocno_class)
29327 case GEN_OR_VSX_REGS:
29328 /* best_class must be a subset of allocno_class. */
29329 gcc_checking_assert (best_class == GEN_OR_VSX_REGS
29330 || best_class == GEN_OR_FLOAT_REGS
29331 || best_class == VSX_REGS
29332 || best_class == ALTIVEC_REGS
29333 || best_class == FLOAT_REGS
29334 || best_class == GENERAL_REGS
29335 || best_class == BASE_REGS);
29336 /* Use best_class but choose wider classes when copying from the
29337 wider class to best_class is cheap. This mimics IRA choice
29338 of allocno class. */
29339 if (best_class == BASE_REGS)
29340 return GENERAL_REGS;
29341 if (TARGET_VSX
29342 && (best_class == FLOAT_REGS || best_class == ALTIVEC_REGS))
29343 return VSX_REGS;
29344 return best_class;
29346 default:
29347 break;
29350 return allocno_class;
29353 /* Returns a code for a target-specific builtin that implements
29354 reciprocal of the function, or NULL_TREE if not available. */
29356 static tree
29357 rs6000_builtin_reciprocal (tree fndecl)
29359 switch (DECL_FUNCTION_CODE (fndecl))
29361 case VSX_BUILTIN_XVSQRTDP:
29362 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
29363 return NULL_TREE;
29365 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
29367 case VSX_BUILTIN_XVSQRTSP:
29368 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
29369 return NULL_TREE;
29371 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
29373 default:
29374 return NULL_TREE;
29378 /* Load up a constant. If the mode is a vector mode, splat the value across
29379 all of the vector elements. */
29381 static rtx
29382 rs6000_load_constant_and_splat (machine_mode mode, REAL_VALUE_TYPE dconst)
29384 rtx reg;
29386 if (mode == SFmode || mode == DFmode)
29388 rtx d = const_double_from_real_value (dconst, mode);
29389 reg = force_reg (mode, d);
29391 else if (mode == V4SFmode)
29393 rtx d = const_double_from_real_value (dconst, SFmode);
29394 rtvec v = gen_rtvec (4, d, d, d, d);
29395 reg = gen_reg_rtx (mode);
29396 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
29398 else if (mode == V2DFmode)
29400 rtx d = const_double_from_real_value (dconst, DFmode);
29401 rtvec v = gen_rtvec (2, d, d);
29402 reg = gen_reg_rtx (mode);
29403 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
29405 else
29406 gcc_unreachable ();
29408 return reg;
29411 /* Generate an FMA instruction. */
29413 static void
29414 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
29416 machine_mode mode = GET_MODE (target);
29417 rtx dst;
29419 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
29420 gcc_assert (dst != NULL);
29422 if (dst != target)
29423 emit_move_insn (target, dst);
29426 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
29428 static void
29429 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
29431 machine_mode mode = GET_MODE (dst);
29432 rtx r;
29434 /* This is a tad more complicated, since the fnma_optab is for
29435 a different expression: fma(-m1, m2, a), which is the same
29436 thing except in the case of signed zeros.
29438 Fortunately we know that if FMA is supported that FNMSUB is
29439 also supported in the ISA. Just expand it directly. */
29441 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
29443 r = gen_rtx_NEG (mode, a);
29444 r = gen_rtx_FMA (mode, m1, m2, r);
29445 r = gen_rtx_NEG (mode, r);
29446 emit_insn (gen_rtx_SET (dst, r));
29449 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
29450 add a reg_note saying that this was a division. Support both scalar and
29451 vector divide. Assumes no trapping math and finite arguments. */
29453 void
29454 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
29456 machine_mode mode = GET_MODE (dst);
29457 rtx one, x0, e0, x1, xprev, eprev, xnext, enext, u, v;
29458 int i;
29460 /* Low precision estimates guarantee 5 bits of accuracy. High
29461 precision estimates guarantee 14 bits of accuracy. SFmode
29462 requires 23 bits of accuracy. DFmode requires 52 bits of
29463 accuracy. Each pass at least doubles the accuracy, leading
29464 to the following. */
29465 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
29466 if (mode == DFmode || mode == V2DFmode)
29467 passes++;
29469 enum insn_code code = optab_handler (smul_optab, mode);
29470 insn_gen_fn gen_mul = GEN_FCN (code);
29472 gcc_assert (code != CODE_FOR_nothing);
29474 one = rs6000_load_constant_and_splat (mode, dconst1);
29476 /* x0 = 1./d estimate */
29477 x0 = gen_reg_rtx (mode);
29478 emit_insn (gen_rtx_SET (x0, gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
29479 UNSPEC_FRES)));
29481 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
29482 if (passes > 1) {
29484 /* e0 = 1. - d * x0 */
29485 e0 = gen_reg_rtx (mode);
29486 rs6000_emit_nmsub (e0, d, x0, one);
29488 /* x1 = x0 + e0 * x0 */
29489 x1 = gen_reg_rtx (mode);
29490 rs6000_emit_madd (x1, e0, x0, x0);
29492 for (i = 0, xprev = x1, eprev = e0; i < passes - 2;
29493 ++i, xprev = xnext, eprev = enext) {
29495 /* enext = eprev * eprev */
29496 enext = gen_reg_rtx (mode);
29497 emit_insn (gen_mul (enext, eprev, eprev));
29499 /* xnext = xprev + enext * xprev */
29500 xnext = gen_reg_rtx (mode);
29501 rs6000_emit_madd (xnext, enext, xprev, xprev);
29504 } else
29505 xprev = x0;
29507 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
29509 /* u = n * xprev */
29510 u = gen_reg_rtx (mode);
29511 emit_insn (gen_mul (u, n, xprev));
29513 /* v = n - (d * u) */
29514 v = gen_reg_rtx (mode);
29515 rs6000_emit_nmsub (v, d, u, n);
29517 /* dst = (v * xprev) + u */
29518 rs6000_emit_madd (dst, v, xprev, u);
29520 if (note_p)
29521 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
29524 /* Goldschmidt's Algorithm for single/double-precision floating point
29525 sqrt and rsqrt. Assumes no trapping math and finite arguments. */
29527 void
29528 rs6000_emit_swsqrt (rtx dst, rtx src, bool recip)
29530 machine_mode mode = GET_MODE (src);
29531 rtx e = gen_reg_rtx (mode);
29532 rtx g = gen_reg_rtx (mode);
29533 rtx h = gen_reg_rtx (mode);
29535 /* Low precision estimates guarantee 5 bits of accuracy. High
29536 precision estimates guarantee 14 bits of accuracy. SFmode
29537 requires 23 bits of accuracy. DFmode requires 52 bits of
29538 accuracy. Each pass at least doubles the accuracy, leading
29539 to the following. */
29540 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
29541 if (mode == DFmode || mode == V2DFmode)
29542 passes++;
29544 int i;
29545 rtx mhalf;
29546 enum insn_code code = optab_handler (smul_optab, mode);
29547 insn_gen_fn gen_mul = GEN_FCN (code);
29549 gcc_assert (code != CODE_FOR_nothing);
29551 mhalf = rs6000_load_constant_and_splat (mode, dconsthalf);
29553 /* e = rsqrt estimate */
29554 emit_insn (gen_rtx_SET (e, gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
29555 UNSPEC_RSQRT)));
29557 /* If (src == 0.0) filter infinity to prevent NaN for sqrt(0.0). */
29558 if (!recip)
29560 rtx zero = force_reg (mode, CONST0_RTX (mode));
29562 if (mode == SFmode)
29564 rtx target = emit_conditional_move (e, GT, src, zero, mode,
29565 e, zero, mode, 0);
29566 if (target != e)
29567 emit_move_insn (e, target);
29569 else
29571 rtx cond = gen_rtx_GT (VOIDmode, e, zero);
29572 rs6000_emit_vector_cond_expr (e, e, zero, cond, src, zero);
29576 /* g = sqrt estimate. */
29577 emit_insn (gen_mul (g, e, src));
29578 /* h = 1/(2*sqrt) estimate. */
29579 emit_insn (gen_mul (h, e, mhalf));
29581 if (recip)
29583 if (passes == 1)
29585 rtx t = gen_reg_rtx (mode);
29586 rs6000_emit_nmsub (t, g, h, mhalf);
29587 /* Apply correction directly to 1/rsqrt estimate. */
29588 rs6000_emit_madd (dst, e, t, e);
29590 else
29592 for (i = 0; i < passes; i++)
29594 rtx t1 = gen_reg_rtx (mode);
29595 rtx g1 = gen_reg_rtx (mode);
29596 rtx h1 = gen_reg_rtx (mode);
29598 rs6000_emit_nmsub (t1, g, h, mhalf);
29599 rs6000_emit_madd (g1, g, t1, g);
29600 rs6000_emit_madd (h1, h, t1, h);
29602 g = g1;
29603 h = h1;
29605 /* Multiply by 2 for 1/rsqrt. */
29606 emit_insn (gen_add3_insn (dst, h, h));
29609 else
29611 rtx t = gen_reg_rtx (mode);
29612 rs6000_emit_nmsub (t, g, h, mhalf);
29613 rs6000_emit_madd (dst, g, t, g);
29616 return;
29619 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
29620 (Power7) targets. DST is the target, and SRC is the argument operand. */
29622 void
29623 rs6000_emit_popcount (rtx dst, rtx src)
29625 machine_mode mode = GET_MODE (dst);
29626 rtx tmp1, tmp2;
29628 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
29629 if (TARGET_POPCNTD)
29631 if (mode == SImode)
29632 emit_insn (gen_popcntdsi2 (dst, src));
29633 else
29634 emit_insn (gen_popcntddi2 (dst, src));
29635 return;
29638 tmp1 = gen_reg_rtx (mode);
29640 if (mode == SImode)
29642 emit_insn (gen_popcntbsi2 (tmp1, src));
29643 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
29644 NULL_RTX, 0);
29645 tmp2 = force_reg (SImode, tmp2);
29646 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
29648 else
29650 emit_insn (gen_popcntbdi2 (tmp1, src));
29651 tmp2 = expand_mult (DImode, tmp1,
29652 GEN_INT ((HOST_WIDE_INT)
29653 0x01010101 << 32 | 0x01010101),
29654 NULL_RTX, 0);
29655 tmp2 = force_reg (DImode, tmp2);
29656 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
29661 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
29662 target, and SRC is the argument operand. */
29664 void
29665 rs6000_emit_parity (rtx dst, rtx src)
29667 machine_mode mode = GET_MODE (dst);
29668 rtx tmp;
29670 tmp = gen_reg_rtx (mode);
29672 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
29673 if (TARGET_CMPB)
29675 if (mode == SImode)
29677 emit_insn (gen_popcntbsi2 (tmp, src));
29678 emit_insn (gen_paritysi2_cmpb (dst, tmp));
29680 else
29682 emit_insn (gen_popcntbdi2 (tmp, src));
29683 emit_insn (gen_paritydi2_cmpb (dst, tmp));
29685 return;
29688 if (mode == SImode)
29690 /* Is mult+shift >= shift+xor+shift+xor? */
29691 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
29693 rtx tmp1, tmp2, tmp3, tmp4;
29695 tmp1 = gen_reg_rtx (SImode);
29696 emit_insn (gen_popcntbsi2 (tmp1, src));
29698 tmp2 = gen_reg_rtx (SImode);
29699 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
29700 tmp3 = gen_reg_rtx (SImode);
29701 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
29703 tmp4 = gen_reg_rtx (SImode);
29704 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
29705 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
29707 else
29708 rs6000_emit_popcount (tmp, src);
29709 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
29711 else
29713 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
29714 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
29716 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
29718 tmp1 = gen_reg_rtx (DImode);
29719 emit_insn (gen_popcntbdi2 (tmp1, src));
29721 tmp2 = gen_reg_rtx (DImode);
29722 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
29723 tmp3 = gen_reg_rtx (DImode);
29724 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
29726 tmp4 = gen_reg_rtx (DImode);
29727 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
29728 tmp5 = gen_reg_rtx (DImode);
29729 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
29731 tmp6 = gen_reg_rtx (DImode);
29732 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
29733 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
29735 else
29736 rs6000_emit_popcount (tmp, src);
29737 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
29741 /* Expand an Altivec constant permutation for little endian mode.
29742 OP0 and OP1 are the input vectors and TARGET is the output vector.
29743 SEL specifies the constant permutation vector.
29745 There are two issues: First, the two input operands must be
29746 swapped so that together they form a double-wide array in LE
29747 order. Second, the vperm instruction has surprising behavior
29748 in LE mode: it interprets the elements of the source vectors
29749 in BE mode ("left to right") and interprets the elements of
29750 the destination vector in LE mode ("right to left"). To
29751 correct for this, we must subtract each element of the permute
29752 control vector from 31.
29754 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
29755 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
29756 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
29757 serve as the permute control vector. Then, in BE mode,
29759 vperm 9,10,11,12
29761 places the desired result in vr9. However, in LE mode the
29762 vector contents will be
29764 vr10 = 00000003 00000002 00000001 00000000
29765 vr11 = 00000007 00000006 00000005 00000004
29767 The result of the vperm using the same permute control vector is
29769 vr9 = 05000000 07000000 01000000 03000000
29771 That is, the leftmost 4 bytes of vr10 are interpreted as the
29772 source for the rightmost 4 bytes of vr9, and so on.
29774 If we change the permute control vector to
29776 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
29778 and issue
29780 vperm 9,11,10,12
29782 we get the desired
29784 vr9 = 00000006 00000004 00000002 00000000. */
29786 static void
29787 altivec_expand_vec_perm_const_le (rtx target, rtx op0, rtx op1,
29788 const vec_perm_indices &sel)
29790 unsigned int i;
29791 rtx perm[16];
29792 rtx constv, unspec;
29794 /* Unpack and adjust the constant selector. */
29795 for (i = 0; i < 16; ++i)
29797 unsigned int elt = 31 - (sel[i] & 31);
29798 perm[i] = GEN_INT (elt);
29801 /* Expand to a permute, swapping the inputs and using the
29802 adjusted selector. */
29803 if (!REG_P (op0))
29804 op0 = force_reg (V16QImode, op0);
29805 if (!REG_P (op1))
29806 op1 = force_reg (V16QImode, op1);
29808 constv = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm));
29809 constv = force_reg (V16QImode, constv);
29810 unspec = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, op1, op0, constv),
29811 UNSPEC_VPERM);
29812 if (!REG_P (target))
29814 rtx tmp = gen_reg_rtx (V16QImode);
29815 emit_move_insn (tmp, unspec);
29816 unspec = tmp;
29819 emit_move_insn (target, unspec);
29822 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
29823 permute control vector. But here it's not a constant, so we must
29824 generate a vector NAND or NOR to do the adjustment. */
29826 void
29827 altivec_expand_vec_perm_le (rtx operands[4])
29829 rtx notx, iorx, unspec;
29830 rtx target = operands[0];
29831 rtx op0 = operands[1];
29832 rtx op1 = operands[2];
29833 rtx sel = operands[3];
29834 rtx tmp = target;
29835 rtx norreg = gen_reg_rtx (V16QImode);
29836 machine_mode mode = GET_MODE (target);
29838 /* Get everything in regs so the pattern matches. */
29839 if (!REG_P (op0))
29840 op0 = force_reg (mode, op0);
29841 if (!REG_P (op1))
29842 op1 = force_reg (mode, op1);
29843 if (!REG_P (sel))
29844 sel = force_reg (V16QImode, sel);
29845 if (!REG_P (target))
29846 tmp = gen_reg_rtx (mode);
29848 if (TARGET_P9_VECTOR)
29850 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, sel),
29851 UNSPEC_VPERMR);
29853 else
29855 /* Invert the selector with a VNAND if available, else a VNOR.
29856 The VNAND is preferred for future fusion opportunities. */
29857 notx = gen_rtx_NOT (V16QImode, sel);
29858 iorx = (TARGET_P8_VECTOR
29859 ? gen_rtx_IOR (V16QImode, notx, notx)
29860 : gen_rtx_AND (V16QImode, notx, notx));
29861 emit_insn (gen_rtx_SET (norreg, iorx));
29863 /* Permute with operands reversed and adjusted selector. */
29864 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, norreg),
29865 UNSPEC_VPERM);
29868 /* Copy into target, possibly by way of a register. */
29869 if (!REG_P (target))
29871 emit_move_insn (tmp, unspec);
29872 unspec = tmp;
29875 emit_move_insn (target, unspec);
29878 /* Expand an Altivec constant permutation. Return true if we match
29879 an efficient implementation; false to fall back to VPERM.
29881 OP0 and OP1 are the input vectors and TARGET is the output vector.
29882 SEL specifies the constant permutation vector. */
29884 static bool
29885 altivec_expand_vec_perm_const (rtx target, rtx op0, rtx op1,
29886 const vec_perm_indices &sel)
29888 struct altivec_perm_insn {
29889 HOST_WIDE_INT mask;
29890 enum insn_code impl;
29891 unsigned char perm[16];
29893 static const struct altivec_perm_insn patterns[] = {
29894 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum_direct,
29895 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
29896 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum_direct,
29897 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
29898 { OPTION_MASK_ALTIVEC,
29899 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghb_direct
29900 : CODE_FOR_altivec_vmrglb_direct),
29901 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
29902 { OPTION_MASK_ALTIVEC,
29903 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghh_direct
29904 : CODE_FOR_altivec_vmrglh_direct),
29905 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
29906 { OPTION_MASK_ALTIVEC,
29907 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghw_direct
29908 : CODE_FOR_altivec_vmrglw_direct),
29909 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
29910 { OPTION_MASK_ALTIVEC,
29911 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglb_direct
29912 : CODE_FOR_altivec_vmrghb_direct),
29913 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
29914 { OPTION_MASK_ALTIVEC,
29915 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglh_direct
29916 : CODE_FOR_altivec_vmrghh_direct),
29917 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
29918 { OPTION_MASK_ALTIVEC,
29919 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglw_direct
29920 : CODE_FOR_altivec_vmrghw_direct),
29921 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
29922 { OPTION_MASK_P8_VECTOR,
29923 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgew_v4sf_direct
29924 : CODE_FOR_p8_vmrgow_v4sf_direct),
29925 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
29926 { OPTION_MASK_P8_VECTOR,
29927 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgow_v4sf_direct
29928 : CODE_FOR_p8_vmrgew_v4sf_direct),
29929 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
29932 unsigned int i, j, elt, which;
29933 unsigned char perm[16];
29934 rtx x;
29935 bool one_vec;
29937 /* Unpack the constant selector. */
29938 for (i = which = 0; i < 16; ++i)
29940 elt = sel[i] & 31;
29941 which |= (elt < 16 ? 1 : 2);
29942 perm[i] = elt;
29945 /* Simplify the constant selector based on operands. */
29946 switch (which)
29948 default:
29949 gcc_unreachable ();
29951 case 3:
29952 one_vec = false;
29953 if (!rtx_equal_p (op0, op1))
29954 break;
29955 /* FALLTHRU */
29957 case 2:
29958 for (i = 0; i < 16; ++i)
29959 perm[i] &= 15;
29960 op0 = op1;
29961 one_vec = true;
29962 break;
29964 case 1:
29965 op1 = op0;
29966 one_vec = true;
29967 break;
29970 /* Look for splat patterns. */
29971 if (one_vec)
29973 elt = perm[0];
29975 for (i = 0; i < 16; ++i)
29976 if (perm[i] != elt)
29977 break;
29978 if (i == 16)
29980 if (!BYTES_BIG_ENDIAN)
29981 elt = 15 - elt;
29982 emit_insn (gen_altivec_vspltb_direct (target, op0, GEN_INT (elt)));
29983 return true;
29986 if (elt % 2 == 0)
29988 for (i = 0; i < 16; i += 2)
29989 if (perm[i] != elt || perm[i + 1] != elt + 1)
29990 break;
29991 if (i == 16)
29993 int field = BYTES_BIG_ENDIAN ? elt / 2 : 7 - elt / 2;
29994 x = gen_reg_rtx (V8HImode);
29995 emit_insn (gen_altivec_vsplth_direct (x, gen_lowpart (V8HImode, op0),
29996 GEN_INT (field)));
29997 emit_move_insn (target, gen_lowpart (V16QImode, x));
29998 return true;
30002 if (elt % 4 == 0)
30004 for (i = 0; i < 16; i += 4)
30005 if (perm[i] != elt
30006 || perm[i + 1] != elt + 1
30007 || perm[i + 2] != elt + 2
30008 || perm[i + 3] != elt + 3)
30009 break;
30010 if (i == 16)
30012 int field = BYTES_BIG_ENDIAN ? elt / 4 : 3 - elt / 4;
30013 x = gen_reg_rtx (V4SImode);
30014 emit_insn (gen_altivec_vspltw_direct (x, gen_lowpart (V4SImode, op0),
30015 GEN_INT (field)));
30016 emit_move_insn (target, gen_lowpart (V16QImode, x));
30017 return true;
30022 /* Look for merge and pack patterns. */
30023 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
30025 bool swapped;
30027 if ((patterns[j].mask & rs6000_isa_flags) == 0)
30028 continue;
30030 elt = patterns[j].perm[0];
30031 if (perm[0] == elt)
30032 swapped = false;
30033 else if (perm[0] == elt + 16)
30034 swapped = true;
30035 else
30036 continue;
30037 for (i = 1; i < 16; ++i)
30039 elt = patterns[j].perm[i];
30040 if (swapped)
30041 elt = (elt >= 16 ? elt - 16 : elt + 16);
30042 else if (one_vec && elt >= 16)
30043 elt -= 16;
30044 if (perm[i] != elt)
30045 break;
30047 if (i == 16)
30049 enum insn_code icode = patterns[j].impl;
30050 machine_mode omode = insn_data[icode].operand[0].mode;
30051 machine_mode imode = insn_data[icode].operand[1].mode;
30053 /* For little-endian, don't use vpkuwum and vpkuhum if the
30054 underlying vector type is not V4SI and V8HI, respectively.
30055 For example, using vpkuwum with a V8HI picks up the even
30056 halfwords (BE numbering) when the even halfwords (LE
30057 numbering) are what we need. */
30058 if (!BYTES_BIG_ENDIAN
30059 && icode == CODE_FOR_altivec_vpkuwum_direct
30060 && ((REG_P (op0)
30061 && GET_MODE (op0) != V4SImode)
30062 || (SUBREG_P (op0)
30063 && GET_MODE (XEXP (op0, 0)) != V4SImode)))
30064 continue;
30065 if (!BYTES_BIG_ENDIAN
30066 && icode == CODE_FOR_altivec_vpkuhum_direct
30067 && ((REG_P (op0)
30068 && GET_MODE (op0) != V8HImode)
30069 || (SUBREG_P (op0)
30070 && GET_MODE (XEXP (op0, 0)) != V8HImode)))
30071 continue;
30073 /* For little-endian, the two input operands must be swapped
30074 (or swapped back) to ensure proper right-to-left numbering
30075 from 0 to 2N-1. */
30076 if (swapped ^ !BYTES_BIG_ENDIAN)
30077 std::swap (op0, op1);
30078 if (imode != V16QImode)
30080 op0 = gen_lowpart (imode, op0);
30081 op1 = gen_lowpart (imode, op1);
30083 if (omode == V16QImode)
30084 x = target;
30085 else
30086 x = gen_reg_rtx (omode);
30087 emit_insn (GEN_FCN (icode) (x, op0, op1));
30088 if (omode != V16QImode)
30089 emit_move_insn (target, gen_lowpart (V16QImode, x));
30090 return true;
30094 if (!BYTES_BIG_ENDIAN)
30096 altivec_expand_vec_perm_const_le (target, op0, op1, sel);
30097 return true;
30100 return false;
30103 /* Expand a VSX Permute Doubleword constant permutation.
30104 Return true if we match an efficient implementation. */
30106 static bool
30107 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
30108 unsigned char perm0, unsigned char perm1)
30110 rtx x;
30112 /* If both selectors come from the same operand, fold to single op. */
30113 if ((perm0 & 2) == (perm1 & 2))
30115 if (perm0 & 2)
30116 op0 = op1;
30117 else
30118 op1 = op0;
30120 /* If both operands are equal, fold to simpler permutation. */
30121 if (rtx_equal_p (op0, op1))
30123 perm0 = perm0 & 1;
30124 perm1 = (perm1 & 1) + 2;
30126 /* If the first selector comes from the second operand, swap. */
30127 else if (perm0 & 2)
30129 if (perm1 & 2)
30130 return false;
30131 perm0 -= 2;
30132 perm1 += 2;
30133 std::swap (op0, op1);
30135 /* If the second selector does not come from the second operand, fail. */
30136 else if ((perm1 & 2) == 0)
30137 return false;
30139 /* Success! */
30140 if (target != NULL)
30142 machine_mode vmode, dmode;
30143 rtvec v;
30145 vmode = GET_MODE (target);
30146 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
30147 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4).require ();
30148 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
30149 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
30150 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
30151 emit_insn (gen_rtx_SET (target, x));
30153 return true;
30156 /* Implement TARGET_VECTORIZE_VEC_PERM_CONST. */
30158 static bool
30159 rs6000_vectorize_vec_perm_const (machine_mode vmode, rtx target, rtx op0,
30160 rtx op1, const vec_perm_indices &sel)
30162 bool testing_p = !target;
30164 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
30165 if (TARGET_ALTIVEC && testing_p)
30166 return true;
30168 /* Check for ps_merge* or xxpermdi insns. */
30169 if ((vmode == V2DFmode || vmode == V2DImode) && VECTOR_MEM_VSX_P (vmode))
30171 if (testing_p)
30173 op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
30174 op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
30176 if (rs6000_expand_vec_perm_const_1 (target, op0, op1, sel[0], sel[1]))
30177 return true;
30180 if (TARGET_ALTIVEC)
30182 /* Force the target-independent code to lower to V16QImode. */
30183 if (vmode != V16QImode)
30184 return false;
30185 if (altivec_expand_vec_perm_const (target, op0, op1, sel))
30186 return true;
30189 return false;
30192 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave.
30193 OP0 and OP1 are the input vectors and TARGET is the output vector.
30194 PERM specifies the constant permutation vector. */
30196 static void
30197 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
30198 machine_mode vmode, const vec_perm_builder &perm)
30200 rtx x = expand_vec_perm_const (vmode, op0, op1, perm, BLKmode, target);
30201 if (x != target)
30202 emit_move_insn (target, x);
30205 /* Expand an extract even operation. */
30207 void
30208 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
30210 machine_mode vmode = GET_MODE (target);
30211 unsigned i, nelt = GET_MODE_NUNITS (vmode);
30212 vec_perm_builder perm (nelt, nelt, 1);
30214 for (i = 0; i < nelt; i++)
30215 perm.quick_push (i * 2);
30217 rs6000_do_expand_vec_perm (target, op0, op1, vmode, perm);
30220 /* Expand a vector interleave operation. */
30222 void
30223 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
30225 machine_mode vmode = GET_MODE (target);
30226 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
30227 vec_perm_builder perm (nelt, nelt, 1);
30229 high = (highp ? 0 : nelt / 2);
30230 for (i = 0; i < nelt / 2; i++)
30232 perm.quick_push (i + high);
30233 perm.quick_push (i + nelt + high);
30236 rs6000_do_expand_vec_perm (target, op0, op1, vmode, perm);
30239 /* Scale a V2DF vector SRC by two to the SCALE and place in TGT. */
30240 void
30241 rs6000_scale_v2df (rtx tgt, rtx src, int scale)
30243 HOST_WIDE_INT hwi_scale (scale);
30244 REAL_VALUE_TYPE r_pow;
30245 rtvec v = rtvec_alloc (2);
30246 rtx elt;
30247 rtx scale_vec = gen_reg_rtx (V2DFmode);
30248 (void)real_powi (&r_pow, DFmode, &dconst2, hwi_scale);
30249 elt = const_double_from_real_value (r_pow, DFmode);
30250 RTVEC_ELT (v, 0) = elt;
30251 RTVEC_ELT (v, 1) = elt;
30252 rs6000_expand_vector_init (scale_vec, gen_rtx_PARALLEL (V2DFmode, v));
30253 emit_insn (gen_mulv2df3 (tgt, src, scale_vec));
30256 /* Return an RTX representing where to find the function value of a
30257 function returning MODE. */
30258 static rtx
30259 rs6000_complex_function_value (machine_mode mode)
30261 unsigned int regno;
30262 rtx r1, r2;
30263 machine_mode inner = GET_MODE_INNER (mode);
30264 unsigned int inner_bytes = GET_MODE_UNIT_SIZE (mode);
30266 if (TARGET_FLOAT128_TYPE
30267 && (mode == KCmode
30268 || (mode == TCmode && TARGET_IEEEQUAD)))
30269 regno = ALTIVEC_ARG_RETURN;
30271 else if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
30272 regno = FP_ARG_RETURN;
30274 else
30276 regno = GP_ARG_RETURN;
30278 /* 32-bit is OK since it'll go in r3/r4. */
30279 if (TARGET_32BIT && inner_bytes >= 4)
30280 return gen_rtx_REG (mode, regno);
30283 if (inner_bytes >= 8)
30284 return gen_rtx_REG (mode, regno);
30286 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
30287 const0_rtx);
30288 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
30289 GEN_INT (inner_bytes));
30290 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
30293 /* Return an rtx describing a return value of MODE as a PARALLEL
30294 in N_ELTS registers, each of mode ELT_MODE, starting at REGNO,
30295 stride REG_STRIDE. */
30297 static rtx
30298 rs6000_parallel_return (machine_mode mode,
30299 int n_elts, machine_mode elt_mode,
30300 unsigned int regno, unsigned int reg_stride)
30302 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
30304 int i;
30305 for (i = 0; i < n_elts; i++)
30307 rtx r = gen_rtx_REG (elt_mode, regno);
30308 rtx off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
30309 XVECEXP (par, 0, i) = gen_rtx_EXPR_LIST (VOIDmode, r, off);
30310 regno += reg_stride;
30313 return par;
30316 /* Target hook for TARGET_FUNCTION_VALUE.
30318 An integer value is in r3 and a floating-point value is in fp1,
30319 unless -msoft-float. */
30321 static rtx
30322 rs6000_function_value (const_tree valtype,
30323 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
30324 bool outgoing ATTRIBUTE_UNUSED)
30326 machine_mode mode;
30327 unsigned int regno;
30328 machine_mode elt_mode;
30329 int n_elts;
30331 /* Special handling for structs in darwin64. */
30332 if (TARGET_MACHO
30333 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
30335 CUMULATIVE_ARGS valcum;
30336 rtx valret;
30338 valcum.words = 0;
30339 valcum.fregno = FP_ARG_MIN_REG;
30340 valcum.vregno = ALTIVEC_ARG_MIN_REG;
30341 /* Do a trial code generation as if this were going to be passed as
30342 an argument; if any part goes in memory, we return NULL. */
30343 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
30344 if (valret)
30345 return valret;
30346 /* Otherwise fall through to standard ABI rules. */
30349 mode = TYPE_MODE (valtype);
30351 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
30352 if (rs6000_discover_homogeneous_aggregate (mode, valtype, &elt_mode, &n_elts))
30354 int first_reg, n_regs;
30356 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (elt_mode))
30358 /* _Decimal128 must use even/odd register pairs. */
30359 first_reg = (elt_mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
30360 n_regs = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
30362 else
30364 first_reg = ALTIVEC_ARG_RETURN;
30365 n_regs = 1;
30368 return rs6000_parallel_return (mode, n_elts, elt_mode, first_reg, n_regs);
30371 /* Some return value types need be split in -mpowerpc64, 32bit ABI. */
30372 if (TARGET_32BIT && TARGET_POWERPC64)
30373 switch (mode)
30375 default:
30376 break;
30377 case E_DImode:
30378 case E_SCmode:
30379 case E_DCmode:
30380 case E_TCmode:
30381 int count = GET_MODE_SIZE (mode) / 4;
30382 return rs6000_parallel_return (mode, count, SImode, GP_ARG_RETURN, 1);
30385 if ((INTEGRAL_TYPE_P (valtype)
30386 && GET_MODE_BITSIZE (mode) < (TARGET_32BIT ? 32 : 64))
30387 || POINTER_TYPE_P (valtype))
30388 mode = TARGET_32BIT ? SImode : DImode;
30390 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
30391 /* _Decimal128 must use an even/odd register pair. */
30392 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
30393 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT
30394 && !FLOAT128_VECTOR_P (mode))
30395 regno = FP_ARG_RETURN;
30396 else if (TREE_CODE (valtype) == COMPLEX_TYPE
30397 && targetm.calls.split_complex_arg)
30398 return rs6000_complex_function_value (mode);
30399 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
30400 return register is used in both cases, and we won't see V2DImode/V2DFmode
30401 for pure altivec, combine the two cases. */
30402 else if ((TREE_CODE (valtype) == VECTOR_TYPE || FLOAT128_VECTOR_P (mode))
30403 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
30404 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
30405 regno = ALTIVEC_ARG_RETURN;
30406 else
30407 regno = GP_ARG_RETURN;
30409 return gen_rtx_REG (mode, regno);
30412 /* Define how to find the value returned by a library function
30413 assuming the value has mode MODE. */
30415 rs6000_libcall_value (machine_mode mode)
30417 unsigned int regno;
30419 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
30420 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
30421 return rs6000_parallel_return (mode, 2, SImode, GP_ARG_RETURN, 1);
30423 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
30424 /* _Decimal128 must use an even/odd register pair. */
30425 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
30426 else if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && TARGET_HARD_FLOAT)
30427 regno = FP_ARG_RETURN;
30428 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
30429 return register is used in both cases, and we won't see V2DImode/V2DFmode
30430 for pure altivec, combine the two cases. */
30431 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
30432 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
30433 regno = ALTIVEC_ARG_RETURN;
30434 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
30435 return rs6000_complex_function_value (mode);
30436 else
30437 regno = GP_ARG_RETURN;
30439 return gen_rtx_REG (mode, regno);
30442 /* Compute register pressure classes. We implement the target hook to avoid
30443 IRA picking something like GEN_OR_FLOAT_REGS as a pressure class, which can
30444 lead to incorrect estimates of number of available registers and therefor
30445 increased register pressure/spill. */
30446 static int
30447 rs6000_compute_pressure_classes (enum reg_class *pressure_classes)
30449 int n;
30451 n = 0;
30452 pressure_classes[n++] = GENERAL_REGS;
30453 if (TARGET_VSX)
30454 pressure_classes[n++] = VSX_REGS;
30455 else
30457 if (TARGET_ALTIVEC)
30458 pressure_classes[n++] = ALTIVEC_REGS;
30459 if (TARGET_HARD_FLOAT)
30460 pressure_classes[n++] = FLOAT_REGS;
30462 pressure_classes[n++] = CR_REGS;
30463 pressure_classes[n++] = SPECIAL_REGS;
30465 return n;
30468 /* Given FROM and TO register numbers, say whether this elimination is allowed.
30469 Frame pointer elimination is automatically handled.
30471 For the RS/6000, if frame pointer elimination is being done, we would like
30472 to convert ap into fp, not sp.
30474 We need r30 if -mminimal-toc was specified, and there are constant pool
30475 references. */
30477 static bool
30478 rs6000_can_eliminate (const int from, const int to)
30480 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
30481 ? ! frame_pointer_needed
30482 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
30483 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC
30484 || constant_pool_empty_p ()
30485 : true);
30488 /* Define the offset between two registers, FROM to be eliminated and its
30489 replacement TO, at the start of a routine. */
30490 HOST_WIDE_INT
30491 rs6000_initial_elimination_offset (int from, int to)
30493 rs6000_stack_t *info = rs6000_stack_info ();
30494 HOST_WIDE_INT offset;
30496 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
30497 offset = info->push_p ? 0 : -info->total_size;
30498 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
30500 offset = info->push_p ? 0 : -info->total_size;
30501 if (FRAME_GROWS_DOWNWARD)
30502 offset += info->fixed_size + info->vars_size + info->parm_size;
30504 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
30505 offset = FRAME_GROWS_DOWNWARD
30506 ? info->fixed_size + info->vars_size + info->parm_size
30507 : 0;
30508 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
30509 offset = info->total_size;
30510 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
30511 offset = info->push_p ? info->total_size : 0;
30512 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
30513 offset = 0;
30514 else
30515 gcc_unreachable ();
30517 return offset;
30520 /* Fill in sizes of registers used by unwinder. */
30522 static void
30523 rs6000_init_dwarf_reg_sizes_extra (tree address)
30525 if (TARGET_MACHO && ! TARGET_ALTIVEC)
30527 int i;
30528 machine_mode mode = TYPE_MODE (char_type_node);
30529 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
30530 rtx mem = gen_rtx_MEM (BLKmode, addr);
30531 rtx value = gen_int_mode (16, mode);
30533 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
30534 The unwinder still needs to know the size of Altivec registers. */
30536 for (i = FIRST_ALTIVEC_REGNO; i < LAST_ALTIVEC_REGNO+1; i++)
30538 int column = DWARF_REG_TO_UNWIND_COLUMN
30539 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
30540 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
30542 emit_move_insn (adjust_address (mem, mode, offset), value);
30547 /* Map internal gcc register numbers to debug format register numbers.
30548 FORMAT specifies the type of debug register number to use:
30549 0 -- debug information, except for frame-related sections
30550 1 -- DWARF .debug_frame section
30551 2 -- DWARF .eh_frame section */
30553 unsigned int
30554 rs6000_dbx_register_number (unsigned int regno, unsigned int format)
30556 /* On some platforms, we use the standard DWARF register
30557 numbering for .debug_info and .debug_frame. */
30558 if ((format == 0 && write_symbols == DWARF2_DEBUG) || format == 1)
30560 #ifdef RS6000_USE_DWARF_NUMBERING
30561 if (regno <= 31)
30562 return regno;
30563 if (FP_REGNO_P (regno))
30564 return regno - FIRST_FPR_REGNO + 32;
30565 if (ALTIVEC_REGNO_P (regno))
30566 return regno - FIRST_ALTIVEC_REGNO + 1124;
30567 if (regno == LR_REGNO)
30568 return 108;
30569 if (regno == CTR_REGNO)
30570 return 109;
30571 if (regno == CA_REGNO)
30572 return 101; /* XER */
30573 /* Special handling for CR for .debug_frame: rs6000_emit_prologue has
30574 translated any combination of CR2, CR3, CR4 saves to a save of CR2.
30575 The actual code emitted saves the whole of CR, so we map CR2_REGNO
30576 to the DWARF reg for CR. */
30577 if (format == 1 && regno == CR2_REGNO)
30578 return 64;
30579 if (CR_REGNO_P (regno))
30580 return regno - CR0_REGNO + 86;
30581 if (regno == VRSAVE_REGNO)
30582 return 356;
30583 if (regno == VSCR_REGNO)
30584 return 67;
30586 /* These do not make much sense. */
30587 if (regno == FRAME_POINTER_REGNUM)
30588 return 111;
30589 if (regno == ARG_POINTER_REGNUM)
30590 return 67;
30591 if (regno == 64)
30592 return 100;
30594 gcc_unreachable ();
30595 #endif
30598 /* We use the GCC 7 (and before) internal number for non-DWARF debug
30599 information, and also for .eh_frame. */
30600 /* Translate the regnos to their numbers in GCC 7 (and before). */
30601 if (regno <= 31)
30602 return regno;
30603 if (FP_REGNO_P (regno))
30604 return regno - FIRST_FPR_REGNO + 32;
30605 if (ALTIVEC_REGNO_P (regno))
30606 return regno - FIRST_ALTIVEC_REGNO + 77;
30607 if (regno == LR_REGNO)
30608 return 65;
30609 if (regno == CTR_REGNO)
30610 return 66;
30611 if (regno == CA_REGNO)
30612 return 76; /* XER */
30613 if (CR_REGNO_P (regno))
30614 return regno - CR0_REGNO + 68;
30615 if (regno == VRSAVE_REGNO)
30616 return 109;
30617 if (regno == VSCR_REGNO)
30618 return 110;
30620 if (regno == FRAME_POINTER_REGNUM)
30621 return 111;
30622 if (regno == ARG_POINTER_REGNUM)
30623 return 67;
30624 if (regno == 64)
30625 return 64;
30627 gcc_unreachable ();
30630 /* target hook eh_return_filter_mode */
30631 static scalar_int_mode
30632 rs6000_eh_return_filter_mode (void)
30634 return TARGET_32BIT ? SImode : word_mode;
30637 /* Target hook for translate_mode_attribute. */
30638 static machine_mode
30639 rs6000_translate_mode_attribute (machine_mode mode)
30641 if ((FLOAT128_IEEE_P (mode)
30642 && ieee128_float_type_node == long_double_type_node)
30643 || (FLOAT128_IBM_P (mode)
30644 && ibm128_float_type_node == long_double_type_node))
30645 return COMPLEX_MODE_P (mode) ? E_TCmode : E_TFmode;
30646 return mode;
30649 /* Target hook for scalar_mode_supported_p. */
30650 static bool
30651 rs6000_scalar_mode_supported_p (scalar_mode mode)
30653 /* -m32 does not support TImode. This is the default, from
30654 default_scalar_mode_supported_p. For -m32 -mpowerpc64 we want the
30655 same ABI as for -m32. But default_scalar_mode_supported_p allows
30656 integer modes of precision 2 * BITS_PER_WORD, which matches TImode
30657 for -mpowerpc64. */
30658 if (TARGET_32BIT && mode == TImode)
30659 return false;
30661 if (DECIMAL_FLOAT_MODE_P (mode))
30662 return default_decimal_float_supported_p ();
30663 else if (TARGET_FLOAT128_TYPE && (mode == KFmode || mode == IFmode))
30664 return true;
30665 else
30666 return default_scalar_mode_supported_p (mode);
30669 /* Target hook for vector_mode_supported_p. */
30670 static bool
30671 rs6000_vector_mode_supported_p (machine_mode mode)
30673 /* There is no vector form for IEEE 128-bit. If we return true for IEEE
30674 128-bit, the compiler might try to widen IEEE 128-bit to IBM
30675 double-double. */
30676 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode) && !FLOAT128_IEEE_P (mode))
30677 return true;
30679 else
30680 return false;
30683 /* Target hook for floatn_mode. */
30684 static opt_scalar_float_mode
30685 rs6000_floatn_mode (int n, bool extended)
30687 if (extended)
30689 switch (n)
30691 case 32:
30692 return DFmode;
30694 case 64:
30695 if (TARGET_FLOAT128_TYPE)
30696 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
30697 else
30698 return opt_scalar_float_mode ();
30700 case 128:
30701 return opt_scalar_float_mode ();
30703 default:
30704 /* Those are the only valid _FloatNx types. */
30705 gcc_unreachable ();
30708 else
30710 switch (n)
30712 case 32:
30713 return SFmode;
30715 case 64:
30716 return DFmode;
30718 case 128:
30719 if (TARGET_FLOAT128_TYPE)
30720 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
30721 else
30722 return opt_scalar_float_mode ();
30724 default:
30725 return opt_scalar_float_mode ();
30731 /* Target hook for c_mode_for_suffix. */
30732 static machine_mode
30733 rs6000_c_mode_for_suffix (char suffix)
30735 if (TARGET_FLOAT128_TYPE)
30737 if (suffix == 'q' || suffix == 'Q')
30738 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
30740 /* At the moment, we are not defining a suffix for IBM extended double.
30741 If/when the default for -mabi=ieeelongdouble is changed, and we want
30742 to support __ibm128 constants in legacy library code, we may need to
30743 re-evalaute this decision. Currently, c-lex.c only supports 'w' and
30744 'q' as machine dependent suffixes. The x86_64 port uses 'w' for
30745 __float80 constants. */
30748 return VOIDmode;
30751 /* Target hook for invalid_arg_for_unprototyped_fn. */
30752 static const char *
30753 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
30755 return (!rs6000_darwin64_abi
30756 && typelist == 0
30757 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
30758 && (funcdecl == NULL_TREE
30759 || (TREE_CODE (funcdecl) == FUNCTION_DECL
30760 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
30761 ? N_("AltiVec argument passed to unprototyped function")
30762 : NULL;
30765 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
30766 setup by using __stack_chk_fail_local hidden function instead of
30767 calling __stack_chk_fail directly. Otherwise it is better to call
30768 __stack_chk_fail directly. */
30770 static tree ATTRIBUTE_UNUSED
30771 rs6000_stack_protect_fail (void)
30773 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
30774 ? default_hidden_stack_protect_fail ()
30775 : default_external_stack_protect_fail ();
30778 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
30780 #if TARGET_ELF
30781 static unsigned HOST_WIDE_INT
30782 rs6000_asan_shadow_offset (void)
30784 return (unsigned HOST_WIDE_INT) 1 << (TARGET_64BIT ? 41 : 29);
30786 #endif
30788 /* Mask options that we want to support inside of attribute((target)) and
30789 #pragma GCC target operations. Note, we do not include things like
30790 64/32-bit, endianness, hard/soft floating point, etc. that would have
30791 different calling sequences. */
30793 struct rs6000_opt_mask {
30794 const char *name; /* option name */
30795 HOST_WIDE_INT mask; /* mask to set */
30796 bool invert; /* invert sense of mask */
30797 bool valid_target; /* option is a target option */
30800 static struct rs6000_opt_mask const rs6000_opt_masks[] =
30802 { "altivec", OPTION_MASK_ALTIVEC, false, true },
30803 { "cmpb", OPTION_MASK_CMPB, false, true },
30804 { "crypto", OPTION_MASK_CRYPTO, false, true },
30805 { "direct-move", OPTION_MASK_DIRECT_MOVE, false, true },
30806 { "dlmzb", OPTION_MASK_DLMZB, false, true },
30807 { "efficient-unaligned-vsx", OPTION_MASK_EFFICIENT_UNALIGNED_VSX,
30808 false, true },
30809 { "float128", OPTION_MASK_FLOAT128_KEYWORD, false, true },
30810 { "float128-hardware", OPTION_MASK_FLOAT128_HW, false, true },
30811 { "fprnd", OPTION_MASK_FPRND, false, true },
30812 { "future", OPTION_MASK_FUTURE, false, true },
30813 { "hard-dfp", OPTION_MASK_DFP, false, true },
30814 { "htm", OPTION_MASK_HTM, false, true },
30815 { "isel", OPTION_MASK_ISEL, false, true },
30816 { "mfcrf", OPTION_MASK_MFCRF, false, true },
30817 { "mfpgpr", 0, false, true },
30818 { "modulo", OPTION_MASK_MODULO, false, true },
30819 { "mulhw", OPTION_MASK_MULHW, false, true },
30820 { "multiple", OPTION_MASK_MULTIPLE, false, true },
30821 { "pcrel", OPTION_MASK_PCREL, false, true },
30822 { "popcntb", OPTION_MASK_POPCNTB, false, true },
30823 { "popcntd", OPTION_MASK_POPCNTD, false, true },
30824 { "power8-fusion", OPTION_MASK_P8_FUSION, false, true },
30825 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN, false, true },
30826 { "power8-vector", OPTION_MASK_P8_VECTOR, false, true },
30827 { "power9-minmax", OPTION_MASK_P9_MINMAX, false, true },
30828 { "power9-misc", OPTION_MASK_P9_MISC, false, true },
30829 { "power9-vector", OPTION_MASK_P9_VECTOR, false, true },
30830 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT, false, true },
30831 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT, false, true },
30832 { "prefixed-addr", OPTION_MASK_PREFIXED_ADDR, false, true },
30833 { "quad-memory", OPTION_MASK_QUAD_MEMORY, false, true },
30834 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC, false, true },
30835 { "recip-precision", OPTION_MASK_RECIP_PRECISION, false, true },
30836 { "save-toc-indirect", OPTION_MASK_SAVE_TOC_INDIRECT, false, true },
30837 { "string", 0, false, true },
30838 { "update", OPTION_MASK_NO_UPDATE, true , true },
30839 { "vsx", OPTION_MASK_VSX, false, true },
30840 #ifdef OPTION_MASK_64BIT
30841 #if TARGET_AIX_OS
30842 { "aix64", OPTION_MASK_64BIT, false, false },
30843 { "aix32", OPTION_MASK_64BIT, true, false },
30844 #else
30845 { "64", OPTION_MASK_64BIT, false, false },
30846 { "32", OPTION_MASK_64BIT, true, false },
30847 #endif
30848 #endif
30849 #ifdef OPTION_MASK_EABI
30850 { "eabi", OPTION_MASK_EABI, false, false },
30851 #endif
30852 #ifdef OPTION_MASK_LITTLE_ENDIAN
30853 { "little", OPTION_MASK_LITTLE_ENDIAN, false, false },
30854 { "big", OPTION_MASK_LITTLE_ENDIAN, true, false },
30855 #endif
30856 #ifdef OPTION_MASK_RELOCATABLE
30857 { "relocatable", OPTION_MASK_RELOCATABLE, false, false },
30858 #endif
30859 #ifdef OPTION_MASK_STRICT_ALIGN
30860 { "strict-align", OPTION_MASK_STRICT_ALIGN, false, false },
30861 #endif
30862 { "soft-float", OPTION_MASK_SOFT_FLOAT, false, false },
30863 { "string", 0, false, false },
30866 /* Builtin mask mapping for printing the flags. */
30867 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
30869 { "altivec", RS6000_BTM_ALTIVEC, false, false },
30870 { "vsx", RS6000_BTM_VSX, false, false },
30871 { "fre", RS6000_BTM_FRE, false, false },
30872 { "fres", RS6000_BTM_FRES, false, false },
30873 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
30874 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
30875 { "popcntd", RS6000_BTM_POPCNTD, false, false },
30876 { "cell", RS6000_BTM_CELL, false, false },
30877 { "power8-vector", RS6000_BTM_P8_VECTOR, false, false },
30878 { "power9-vector", RS6000_BTM_P9_VECTOR, false, false },
30879 { "power9-misc", RS6000_BTM_P9_MISC, false, false },
30880 { "crypto", RS6000_BTM_CRYPTO, false, false },
30881 { "htm", RS6000_BTM_HTM, false, false },
30882 { "hard-dfp", RS6000_BTM_DFP, false, false },
30883 { "hard-float", RS6000_BTM_HARD_FLOAT, false, false },
30884 { "long-double-128", RS6000_BTM_LDBL128, false, false },
30885 { "powerpc64", RS6000_BTM_POWERPC64, false, false },
30886 { "float128", RS6000_BTM_FLOAT128, false, false },
30887 { "float128-hw", RS6000_BTM_FLOAT128_HW,false, false },
30890 /* Option variables that we want to support inside attribute((target)) and
30891 #pragma GCC target operations. */
30893 struct rs6000_opt_var {
30894 const char *name; /* option name */
30895 size_t global_offset; /* offset of the option in global_options. */
30896 size_t target_offset; /* offset of the option in target options. */
30899 static struct rs6000_opt_var const rs6000_opt_vars[] =
30901 { "friz",
30902 offsetof (struct gcc_options, x_TARGET_FRIZ),
30903 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
30904 { "avoid-indexed-addresses",
30905 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
30906 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
30907 { "longcall",
30908 offsetof (struct gcc_options, x_rs6000_default_long_calls),
30909 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
30910 { "optimize-swaps",
30911 offsetof (struct gcc_options, x_rs6000_optimize_swaps),
30912 offsetof (struct cl_target_option, x_rs6000_optimize_swaps), },
30913 { "allow-movmisalign",
30914 offsetof (struct gcc_options, x_TARGET_ALLOW_MOVMISALIGN),
30915 offsetof (struct cl_target_option, x_TARGET_ALLOW_MOVMISALIGN), },
30916 { "sched-groups",
30917 offsetof (struct gcc_options, x_TARGET_SCHED_GROUPS),
30918 offsetof (struct cl_target_option, x_TARGET_SCHED_GROUPS), },
30919 { "always-hint",
30920 offsetof (struct gcc_options, x_TARGET_ALWAYS_HINT),
30921 offsetof (struct cl_target_option, x_TARGET_ALWAYS_HINT), },
30922 { "align-branch-targets",
30923 offsetof (struct gcc_options, x_TARGET_ALIGN_BRANCH_TARGETS),
30924 offsetof (struct cl_target_option, x_TARGET_ALIGN_BRANCH_TARGETS), },
30925 { "tls-markers",
30926 offsetof (struct gcc_options, x_tls_markers),
30927 offsetof (struct cl_target_option, x_tls_markers), },
30928 { "sched-prolog",
30929 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
30930 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
30931 { "sched-epilog",
30932 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
30933 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
30934 { "speculate-indirect-jumps",
30935 offsetof (struct gcc_options, x_rs6000_speculate_indirect_jumps),
30936 offsetof (struct cl_target_option, x_rs6000_speculate_indirect_jumps), },
30939 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
30940 parsing. Return true if there were no errors. */
30942 static bool
30943 rs6000_inner_target_options (tree args, bool attr_p)
30945 bool ret = true;
30947 if (args == NULL_TREE)
30950 else if (TREE_CODE (args) == STRING_CST)
30952 char *p = ASTRDUP (TREE_STRING_POINTER (args));
30953 char *q;
30955 while ((q = strtok (p, ",")) != NULL)
30957 bool error_p = false;
30958 bool not_valid_p = false;
30959 const char *cpu_opt = NULL;
30961 p = NULL;
30962 if (strncmp (q, "cpu=", 4) == 0)
30964 int cpu_index = rs6000_cpu_name_lookup (q+4);
30965 if (cpu_index >= 0)
30966 rs6000_cpu_index = cpu_index;
30967 else
30969 error_p = true;
30970 cpu_opt = q+4;
30973 else if (strncmp (q, "tune=", 5) == 0)
30975 int tune_index = rs6000_cpu_name_lookup (q+5);
30976 if (tune_index >= 0)
30977 rs6000_tune_index = tune_index;
30978 else
30980 error_p = true;
30981 cpu_opt = q+5;
30984 else
30986 size_t i;
30987 bool invert = false;
30988 char *r = q;
30990 error_p = true;
30991 if (strncmp (r, "no-", 3) == 0)
30993 invert = true;
30994 r += 3;
30997 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
30998 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
31000 HOST_WIDE_INT mask = rs6000_opt_masks[i].mask;
31002 if (!rs6000_opt_masks[i].valid_target)
31003 not_valid_p = true;
31004 else
31006 error_p = false;
31007 rs6000_isa_flags_explicit |= mask;
31009 /* VSX needs altivec, so -mvsx automagically sets
31010 altivec and disables -mavoid-indexed-addresses. */
31011 if (!invert)
31013 if (mask == OPTION_MASK_VSX)
31015 mask |= OPTION_MASK_ALTIVEC;
31016 TARGET_AVOID_XFORM = 0;
31020 if (rs6000_opt_masks[i].invert)
31021 invert = !invert;
31023 if (invert)
31024 rs6000_isa_flags &= ~mask;
31025 else
31026 rs6000_isa_flags |= mask;
31028 break;
31031 if (error_p && !not_valid_p)
31033 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
31034 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
31036 size_t j = rs6000_opt_vars[i].global_offset;
31037 *((int *) ((char *)&global_options + j)) = !invert;
31038 error_p = false;
31039 not_valid_p = false;
31040 break;
31045 if (error_p)
31047 const char *eprefix, *esuffix;
31049 ret = false;
31050 if (attr_p)
31052 eprefix = "__attribute__((__target__(";
31053 esuffix = ")))";
31055 else
31057 eprefix = "#pragma GCC target ";
31058 esuffix = "";
31061 if (cpu_opt)
31062 error ("invalid cpu %qs for %s%qs%s", cpu_opt, eprefix,
31063 q, esuffix);
31064 else if (not_valid_p)
31065 error ("%s%qs%s is not allowed", eprefix, q, esuffix);
31066 else
31067 error ("%s%qs%s is invalid", eprefix, q, esuffix);
31072 else if (TREE_CODE (args) == TREE_LIST)
31076 tree value = TREE_VALUE (args);
31077 if (value)
31079 bool ret2 = rs6000_inner_target_options (value, attr_p);
31080 if (!ret2)
31081 ret = false;
31083 args = TREE_CHAIN (args);
31085 while (args != NULL_TREE);
31088 else
31090 error ("attribute %<target%> argument not a string");
31091 return false;
31094 return ret;
31097 /* Print out the target options as a list for -mdebug=target. */
31099 static void
31100 rs6000_debug_target_options (tree args, const char *prefix)
31102 if (args == NULL_TREE)
31103 fprintf (stderr, "%s<NULL>", prefix);
31105 else if (TREE_CODE (args) == STRING_CST)
31107 char *p = ASTRDUP (TREE_STRING_POINTER (args));
31108 char *q;
31110 while ((q = strtok (p, ",")) != NULL)
31112 p = NULL;
31113 fprintf (stderr, "%s\"%s\"", prefix, q);
31114 prefix = ", ";
31118 else if (TREE_CODE (args) == TREE_LIST)
31122 tree value = TREE_VALUE (args);
31123 if (value)
31125 rs6000_debug_target_options (value, prefix);
31126 prefix = ", ";
31128 args = TREE_CHAIN (args);
31130 while (args != NULL_TREE);
31133 else
31134 gcc_unreachable ();
31136 return;
31140 /* Hook to validate attribute((target("..."))). */
31142 static bool
31143 rs6000_valid_attribute_p (tree fndecl,
31144 tree ARG_UNUSED (name),
31145 tree args,
31146 int flags)
31148 struct cl_target_option cur_target;
31149 bool ret;
31150 tree old_optimize;
31151 tree new_target, new_optimize;
31152 tree func_optimize;
31154 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
31156 if (TARGET_DEBUG_TARGET)
31158 tree tname = DECL_NAME (fndecl);
31159 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
31160 if (tname)
31161 fprintf (stderr, "function: %.*s\n",
31162 (int) IDENTIFIER_LENGTH (tname),
31163 IDENTIFIER_POINTER (tname));
31164 else
31165 fprintf (stderr, "function: unknown\n");
31167 fprintf (stderr, "args:");
31168 rs6000_debug_target_options (args, " ");
31169 fprintf (stderr, "\n");
31171 if (flags)
31172 fprintf (stderr, "flags: 0x%x\n", flags);
31174 fprintf (stderr, "--------------------\n");
31177 /* attribute((target("default"))) does nothing, beyond
31178 affecting multi-versioning. */
31179 if (TREE_VALUE (args)
31180 && TREE_CODE (TREE_VALUE (args)) == STRING_CST
31181 && TREE_CHAIN (args) == NULL_TREE
31182 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args)), "default") == 0)
31183 return true;
31185 old_optimize = build_optimization_node (&global_options);
31186 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
31188 /* If the function changed the optimization levels as well as setting target
31189 options, start with the optimizations specified. */
31190 if (func_optimize && func_optimize != old_optimize)
31191 cl_optimization_restore (&global_options,
31192 TREE_OPTIMIZATION (func_optimize));
31194 /* The target attributes may also change some optimization flags, so update
31195 the optimization options if necessary. */
31196 cl_target_option_save (&cur_target, &global_options);
31197 rs6000_cpu_index = rs6000_tune_index = -1;
31198 ret = rs6000_inner_target_options (args, true);
31200 /* Set up any additional state. */
31201 if (ret)
31203 ret = rs6000_option_override_internal (false);
31204 new_target = build_target_option_node (&global_options);
31206 else
31207 new_target = NULL;
31209 new_optimize = build_optimization_node (&global_options);
31211 if (!new_target)
31212 ret = false;
31214 else if (fndecl)
31216 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
31218 if (old_optimize != new_optimize)
31219 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
31222 cl_target_option_restore (&global_options, &cur_target);
31224 if (old_optimize != new_optimize)
31225 cl_optimization_restore (&global_options,
31226 TREE_OPTIMIZATION (old_optimize));
31228 return ret;
31232 /* Hook to validate the current #pragma GCC target and set the state, and
31233 update the macros based on what was changed. If ARGS is NULL, then
31234 POP_TARGET is used to reset the options. */
31236 bool
31237 rs6000_pragma_target_parse (tree args, tree pop_target)
31239 tree prev_tree = build_target_option_node (&global_options);
31240 tree cur_tree;
31241 struct cl_target_option *prev_opt, *cur_opt;
31242 HOST_WIDE_INT prev_flags, cur_flags, diff_flags;
31243 HOST_WIDE_INT prev_bumask, cur_bumask, diff_bumask;
31245 if (TARGET_DEBUG_TARGET)
31247 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
31248 fprintf (stderr, "args:");
31249 rs6000_debug_target_options (args, " ");
31250 fprintf (stderr, "\n");
31252 if (pop_target)
31254 fprintf (stderr, "pop_target:\n");
31255 debug_tree (pop_target);
31257 else
31258 fprintf (stderr, "pop_target: <NULL>\n");
31260 fprintf (stderr, "--------------------\n");
31263 if (! args)
31265 cur_tree = ((pop_target)
31266 ? pop_target
31267 : target_option_default_node);
31268 cl_target_option_restore (&global_options,
31269 TREE_TARGET_OPTION (cur_tree));
31271 else
31273 rs6000_cpu_index = rs6000_tune_index = -1;
31274 if (!rs6000_inner_target_options (args, false)
31275 || !rs6000_option_override_internal (false)
31276 || (cur_tree = build_target_option_node (&global_options))
31277 == NULL_TREE)
31279 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
31280 fprintf (stderr, "invalid pragma\n");
31282 return false;
31286 target_option_current_node = cur_tree;
31287 rs6000_activate_target_options (target_option_current_node);
31289 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
31290 change the macros that are defined. */
31291 if (rs6000_target_modify_macros_ptr)
31293 prev_opt = TREE_TARGET_OPTION (prev_tree);
31294 prev_bumask = prev_opt->x_rs6000_builtin_mask;
31295 prev_flags = prev_opt->x_rs6000_isa_flags;
31297 cur_opt = TREE_TARGET_OPTION (cur_tree);
31298 cur_flags = cur_opt->x_rs6000_isa_flags;
31299 cur_bumask = cur_opt->x_rs6000_builtin_mask;
31301 diff_bumask = (prev_bumask ^ cur_bumask);
31302 diff_flags = (prev_flags ^ cur_flags);
31304 if ((diff_flags != 0) || (diff_bumask != 0))
31306 /* Delete old macros. */
31307 rs6000_target_modify_macros_ptr (false,
31308 prev_flags & diff_flags,
31309 prev_bumask & diff_bumask);
31311 /* Define new macros. */
31312 rs6000_target_modify_macros_ptr (true,
31313 cur_flags & diff_flags,
31314 cur_bumask & diff_bumask);
31318 return true;
31322 /* Remember the last target of rs6000_set_current_function. */
31323 static GTY(()) tree rs6000_previous_fndecl;
31325 /* Restore target's globals from NEW_TREE and invalidate the
31326 rs6000_previous_fndecl cache. */
31328 void
31329 rs6000_activate_target_options (tree new_tree)
31331 cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
31332 if (TREE_TARGET_GLOBALS (new_tree))
31333 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
31334 else if (new_tree == target_option_default_node)
31335 restore_target_globals (&default_target_globals);
31336 else
31337 TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
31338 rs6000_previous_fndecl = NULL_TREE;
31341 /* Establish appropriate back-end context for processing the function
31342 FNDECL. The argument might be NULL to indicate processing at top
31343 level, outside of any function scope. */
31344 static void
31345 rs6000_set_current_function (tree fndecl)
31347 if (TARGET_DEBUG_TARGET)
31349 fprintf (stderr, "\n==================== rs6000_set_current_function");
31351 if (fndecl)
31352 fprintf (stderr, ", fndecl %s (%p)",
31353 (DECL_NAME (fndecl)
31354 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
31355 : "<unknown>"), (void *)fndecl);
31357 if (rs6000_previous_fndecl)
31358 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
31360 fprintf (stderr, "\n");
31363 /* Only change the context if the function changes. This hook is called
31364 several times in the course of compiling a function, and we don't want to
31365 slow things down too much or call target_reinit when it isn't safe. */
31366 if (fndecl == rs6000_previous_fndecl)
31367 return;
31369 tree old_tree;
31370 if (rs6000_previous_fndecl == NULL_TREE)
31371 old_tree = target_option_current_node;
31372 else if (DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl))
31373 old_tree = DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl);
31374 else
31375 old_tree = target_option_default_node;
31377 tree new_tree;
31378 if (fndecl == NULL_TREE)
31380 if (old_tree != target_option_current_node)
31381 new_tree = target_option_current_node;
31382 else
31383 new_tree = NULL_TREE;
31385 else
31387 new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
31388 if (new_tree == NULL_TREE)
31389 new_tree = target_option_default_node;
31392 if (TARGET_DEBUG_TARGET)
31394 if (new_tree)
31396 fprintf (stderr, "\nnew fndecl target specific options:\n");
31397 debug_tree (new_tree);
31400 if (old_tree)
31402 fprintf (stderr, "\nold fndecl target specific options:\n");
31403 debug_tree (old_tree);
31406 if (old_tree != NULL_TREE || new_tree != NULL_TREE)
31407 fprintf (stderr, "--------------------\n");
31410 if (new_tree && old_tree != new_tree)
31411 rs6000_activate_target_options (new_tree);
31413 if (fndecl)
31414 rs6000_previous_fndecl = fndecl;
31418 /* Save the current options */
31420 static void
31421 rs6000_function_specific_save (struct cl_target_option *ptr,
31422 struct gcc_options *opts)
31424 ptr->x_rs6000_isa_flags = opts->x_rs6000_isa_flags;
31425 ptr->x_rs6000_isa_flags_explicit = opts->x_rs6000_isa_flags_explicit;
31428 /* Restore the current options */
31430 static void
31431 rs6000_function_specific_restore (struct gcc_options *opts,
31432 struct cl_target_option *ptr)
31435 opts->x_rs6000_isa_flags = ptr->x_rs6000_isa_flags;
31436 opts->x_rs6000_isa_flags_explicit = ptr->x_rs6000_isa_flags_explicit;
31437 (void) rs6000_option_override_internal (false);
31440 /* Print the current options */
31442 static void
31443 rs6000_function_specific_print (FILE *file, int indent,
31444 struct cl_target_option *ptr)
31446 rs6000_print_isa_options (file, indent, "Isa options set",
31447 ptr->x_rs6000_isa_flags);
31449 rs6000_print_isa_options (file, indent, "Isa options explicit",
31450 ptr->x_rs6000_isa_flags_explicit);
31453 /* Helper function to print the current isa or misc options on a line. */
31455 static void
31456 rs6000_print_options_internal (FILE *file,
31457 int indent,
31458 const char *string,
31459 HOST_WIDE_INT flags,
31460 const char *prefix,
31461 const struct rs6000_opt_mask *opts,
31462 size_t num_elements)
31464 size_t i;
31465 size_t start_column = 0;
31466 size_t cur_column;
31467 size_t max_column = 120;
31468 size_t prefix_len = strlen (prefix);
31469 size_t comma_len = 0;
31470 const char *comma = "";
31472 if (indent)
31473 start_column += fprintf (file, "%*s", indent, "");
31475 if (!flags)
31477 fprintf (stderr, DEBUG_FMT_S, string, "<none>");
31478 return;
31481 start_column += fprintf (stderr, DEBUG_FMT_WX, string, flags);
31483 /* Print the various mask options. */
31484 cur_column = start_column;
31485 for (i = 0; i < num_elements; i++)
31487 bool invert = opts[i].invert;
31488 const char *name = opts[i].name;
31489 const char *no_str = "";
31490 HOST_WIDE_INT mask = opts[i].mask;
31491 size_t len = comma_len + prefix_len + strlen (name);
31493 if (!invert)
31495 if ((flags & mask) == 0)
31497 no_str = "no-";
31498 len += sizeof ("no-") - 1;
31501 flags &= ~mask;
31504 else
31506 if ((flags & mask) != 0)
31508 no_str = "no-";
31509 len += sizeof ("no-") - 1;
31512 flags |= mask;
31515 cur_column += len;
31516 if (cur_column > max_column)
31518 fprintf (stderr, ", \\\n%*s", (int)start_column, "");
31519 cur_column = start_column + len;
31520 comma = "";
31523 fprintf (file, "%s%s%s%s", comma, prefix, no_str, name);
31524 comma = ", ";
31525 comma_len = sizeof (", ") - 1;
31528 fputs ("\n", file);
31531 /* Helper function to print the current isa options on a line. */
31533 static void
31534 rs6000_print_isa_options (FILE *file, int indent, const char *string,
31535 HOST_WIDE_INT flags)
31537 rs6000_print_options_internal (file, indent, string, flags, "-m",
31538 &rs6000_opt_masks[0],
31539 ARRAY_SIZE (rs6000_opt_masks));
31542 static void
31543 rs6000_print_builtin_options (FILE *file, int indent, const char *string,
31544 HOST_WIDE_INT flags)
31546 rs6000_print_options_internal (file, indent, string, flags, "",
31547 &rs6000_builtin_mask_names[0],
31548 ARRAY_SIZE (rs6000_builtin_mask_names));
31551 /* If the user used -mno-vsx, we need turn off all of the implicit ISA 2.06,
31552 2.07, and 3.0 options that relate to the vector unit (-mdirect-move,
31553 -mupper-regs-df, etc.).
31555 If the user used -mno-power8-vector, we need to turn off all of the implicit
31556 ISA 2.07 and 3.0 options that relate to the vector unit.
31558 If the user used -mno-power9-vector, we need to turn off all of the implicit
31559 ISA 3.0 options that relate to the vector unit.
31561 This function does not handle explicit options such as the user specifying
31562 -mdirect-move. These are handled in rs6000_option_override_internal, and
31563 the appropriate error is given if needed.
31565 We return a mask of all of the implicit options that should not be enabled
31566 by default. */
31568 static HOST_WIDE_INT
31569 rs6000_disable_incompatible_switches (void)
31571 HOST_WIDE_INT ignore_masks = rs6000_isa_flags_explicit;
31572 size_t i, j;
31574 static const struct {
31575 const HOST_WIDE_INT no_flag; /* flag explicitly turned off. */
31576 const HOST_WIDE_INT dep_flags; /* flags that depend on this option. */
31577 const char *const name; /* name of the switch. */
31578 } flags[] = {
31579 { OPTION_MASK_FUTURE, OTHER_FUTURE_MASKS, "future" },
31580 { OPTION_MASK_P9_VECTOR, OTHER_P9_VECTOR_MASKS, "power9-vector" },
31581 { OPTION_MASK_P8_VECTOR, OTHER_P8_VECTOR_MASKS, "power8-vector" },
31582 { OPTION_MASK_VSX, OTHER_VSX_VECTOR_MASKS, "vsx" },
31585 for (i = 0; i < ARRAY_SIZE (flags); i++)
31587 HOST_WIDE_INT no_flag = flags[i].no_flag;
31589 if ((rs6000_isa_flags & no_flag) == 0
31590 && (rs6000_isa_flags_explicit & no_flag) != 0)
31592 HOST_WIDE_INT dep_flags = flags[i].dep_flags;
31593 HOST_WIDE_INT set_flags = (rs6000_isa_flags_explicit
31594 & rs6000_isa_flags
31595 & dep_flags);
31597 if (set_flags)
31599 for (j = 0; j < ARRAY_SIZE (rs6000_opt_masks); j++)
31600 if ((set_flags & rs6000_opt_masks[j].mask) != 0)
31602 set_flags &= ~rs6000_opt_masks[j].mask;
31603 error ("%<-mno-%s%> turns off %<-m%s%>",
31604 flags[i].name,
31605 rs6000_opt_masks[j].name);
31608 gcc_assert (!set_flags);
31611 rs6000_isa_flags &= ~dep_flags;
31612 ignore_masks |= no_flag | dep_flags;
31616 return ignore_masks;
31620 /* Helper function for printing the function name when debugging. */
31622 static const char *
31623 get_decl_name (tree fn)
31625 tree name;
31627 if (!fn)
31628 return "<null>";
31630 name = DECL_NAME (fn);
31631 if (!name)
31632 return "<no-name>";
31634 return IDENTIFIER_POINTER (name);
31637 /* Return the clone id of the target we are compiling code for in a target
31638 clone. The clone id is ordered from 0 (default) to CLONE_MAX-1 and gives
31639 the priority list for the target clones (ordered from lowest to
31640 highest). */
31642 static int
31643 rs6000_clone_priority (tree fndecl)
31645 tree fn_opts = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
31646 HOST_WIDE_INT isa_masks;
31647 int ret = CLONE_DEFAULT;
31648 tree attrs = lookup_attribute ("target", DECL_ATTRIBUTES (fndecl));
31649 const char *attrs_str = NULL;
31651 attrs = TREE_VALUE (TREE_VALUE (attrs));
31652 attrs_str = TREE_STRING_POINTER (attrs);
31654 /* Return priority zero for default function. Return the ISA needed for the
31655 function if it is not the default. */
31656 if (strcmp (attrs_str, "default") != 0)
31658 if (fn_opts == NULL_TREE)
31659 fn_opts = target_option_default_node;
31661 if (!fn_opts || !TREE_TARGET_OPTION (fn_opts))
31662 isa_masks = rs6000_isa_flags;
31663 else
31664 isa_masks = TREE_TARGET_OPTION (fn_opts)->x_rs6000_isa_flags;
31666 for (ret = CLONE_MAX - 1; ret != 0; ret--)
31667 if ((rs6000_clone_map[ret].isa_mask & isa_masks) != 0)
31668 break;
31671 if (TARGET_DEBUG_TARGET)
31672 fprintf (stderr, "rs6000_get_function_version_priority (%s) => %d\n",
31673 get_decl_name (fndecl), ret);
31675 return ret;
31678 /* This compares the priority of target features in function DECL1 and DECL2.
31679 It returns positive value if DECL1 is higher priority, negative value if
31680 DECL2 is higher priority and 0 if they are the same. Note, priorities are
31681 ordered from lowest (CLONE_DEFAULT) to highest (currently CLONE_ISA_3_0). */
31683 static int
31684 rs6000_compare_version_priority (tree decl1, tree decl2)
31686 int priority1 = rs6000_clone_priority (decl1);
31687 int priority2 = rs6000_clone_priority (decl2);
31688 int ret = priority1 - priority2;
31690 if (TARGET_DEBUG_TARGET)
31691 fprintf (stderr, "rs6000_compare_version_priority (%s, %s) => %d\n",
31692 get_decl_name (decl1), get_decl_name (decl2), ret);
31694 return ret;
31697 /* Make a dispatcher declaration for the multi-versioned function DECL.
31698 Calls to DECL function will be replaced with calls to the dispatcher
31699 by the front-end. Returns the decl of the dispatcher function. */
31701 static tree
31702 rs6000_get_function_versions_dispatcher (void *decl)
31704 tree fn = (tree) decl;
31705 struct cgraph_node *node = NULL;
31706 struct cgraph_node *default_node = NULL;
31707 struct cgraph_function_version_info *node_v = NULL;
31708 struct cgraph_function_version_info *first_v = NULL;
31710 tree dispatch_decl = NULL;
31712 struct cgraph_function_version_info *default_version_info = NULL;
31713 gcc_assert (fn != NULL && DECL_FUNCTION_VERSIONED (fn));
31715 if (TARGET_DEBUG_TARGET)
31716 fprintf (stderr, "rs6000_get_function_versions_dispatcher (%s)\n",
31717 get_decl_name (fn));
31719 node = cgraph_node::get (fn);
31720 gcc_assert (node != NULL);
31722 node_v = node->function_version ();
31723 gcc_assert (node_v != NULL);
31725 if (node_v->dispatcher_resolver != NULL)
31726 return node_v->dispatcher_resolver;
31728 /* Find the default version and make it the first node. */
31729 first_v = node_v;
31730 /* Go to the beginning of the chain. */
31731 while (first_v->prev != NULL)
31732 first_v = first_v->prev;
31734 default_version_info = first_v;
31735 while (default_version_info != NULL)
31737 const tree decl2 = default_version_info->this_node->decl;
31738 if (is_function_default_version (decl2))
31739 break;
31740 default_version_info = default_version_info->next;
31743 /* If there is no default node, just return NULL. */
31744 if (default_version_info == NULL)
31745 return NULL;
31747 /* Make default info the first node. */
31748 if (first_v != default_version_info)
31750 default_version_info->prev->next = default_version_info->next;
31751 if (default_version_info->next)
31752 default_version_info->next->prev = default_version_info->prev;
31753 first_v->prev = default_version_info;
31754 default_version_info->next = first_v;
31755 default_version_info->prev = NULL;
31758 default_node = default_version_info->this_node;
31760 #ifndef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
31761 error_at (DECL_SOURCE_LOCATION (default_node->decl),
31762 "%<target_clones%> attribute needs GLIBC (2.23 and newer) that "
31763 "exports hardware capability bits");
31764 #else
31766 if (targetm.has_ifunc_p ())
31768 struct cgraph_function_version_info *it_v = NULL;
31769 struct cgraph_node *dispatcher_node = NULL;
31770 struct cgraph_function_version_info *dispatcher_version_info = NULL;
31772 /* Right now, the dispatching is done via ifunc. */
31773 dispatch_decl = make_dispatcher_decl (default_node->decl);
31775 dispatcher_node = cgraph_node::get_create (dispatch_decl);
31776 gcc_assert (dispatcher_node != NULL);
31777 dispatcher_node->dispatcher_function = 1;
31778 dispatcher_version_info
31779 = dispatcher_node->insert_new_function_version ();
31780 dispatcher_version_info->next = default_version_info;
31781 dispatcher_node->definition = 1;
31783 /* Set the dispatcher for all the versions. */
31784 it_v = default_version_info;
31785 while (it_v != NULL)
31787 it_v->dispatcher_resolver = dispatch_decl;
31788 it_v = it_v->next;
31791 else
31793 error_at (DECL_SOURCE_LOCATION (default_node->decl),
31794 "multiversioning needs ifunc which is not supported "
31795 "on this target");
31797 #endif
31799 return dispatch_decl;
31802 /* Make the resolver function decl to dispatch the versions of a multi-
31803 versioned function, DEFAULT_DECL. Create an empty basic block in the
31804 resolver and store the pointer in EMPTY_BB. Return the decl of the resolver
31805 function. */
31807 static tree
31808 make_resolver_func (const tree default_decl,
31809 const tree dispatch_decl,
31810 basic_block *empty_bb)
31812 /* Make the resolver function static. The resolver function returns
31813 void *. */
31814 tree decl_name = clone_function_name (default_decl, "resolver");
31815 const char *resolver_name = IDENTIFIER_POINTER (decl_name);
31816 tree type = build_function_type_list (ptr_type_node, NULL_TREE);
31817 tree decl = build_fn_decl (resolver_name, type);
31818 SET_DECL_ASSEMBLER_NAME (decl, decl_name);
31820 DECL_NAME (decl) = decl_name;
31821 TREE_USED (decl) = 1;
31822 DECL_ARTIFICIAL (decl) = 1;
31823 DECL_IGNORED_P (decl) = 0;
31824 TREE_PUBLIC (decl) = 0;
31825 DECL_UNINLINABLE (decl) = 1;
31827 /* Resolver is not external, body is generated. */
31828 DECL_EXTERNAL (decl) = 0;
31829 DECL_EXTERNAL (dispatch_decl) = 0;
31831 DECL_CONTEXT (decl) = NULL_TREE;
31832 DECL_INITIAL (decl) = make_node (BLOCK);
31833 DECL_STATIC_CONSTRUCTOR (decl) = 0;
31835 /* Build result decl and add to function_decl. */
31836 tree t = build_decl (UNKNOWN_LOCATION, RESULT_DECL, NULL_TREE, ptr_type_node);
31837 DECL_CONTEXT (t) = decl;
31838 DECL_ARTIFICIAL (t) = 1;
31839 DECL_IGNORED_P (t) = 1;
31840 DECL_RESULT (decl) = t;
31842 gimplify_function_tree (decl);
31843 push_cfun (DECL_STRUCT_FUNCTION (decl));
31844 *empty_bb = init_lowered_empty_function (decl, false,
31845 profile_count::uninitialized ());
31847 cgraph_node::add_new_function (decl, true);
31848 symtab->call_cgraph_insertion_hooks (cgraph_node::get_create (decl));
31850 pop_cfun ();
31852 /* Mark dispatch_decl as "ifunc" with resolver as resolver_name. */
31853 DECL_ATTRIBUTES (dispatch_decl)
31854 = make_attribute ("ifunc", resolver_name, DECL_ATTRIBUTES (dispatch_decl));
31856 cgraph_node::create_same_body_alias (dispatch_decl, decl);
31858 return decl;
31861 /* This adds a condition to the basic_block NEW_BB in function FUNCTION_DECL to
31862 return a pointer to VERSION_DECL if we are running on a machine that
31863 supports the index CLONE_ISA hardware architecture bits. This function will
31864 be called during version dispatch to decide which function version to
31865 execute. It returns the basic block at the end, to which more conditions
31866 can be added. */
31868 static basic_block
31869 add_condition_to_bb (tree function_decl, tree version_decl,
31870 int clone_isa, basic_block new_bb)
31872 push_cfun (DECL_STRUCT_FUNCTION (function_decl));
31874 gcc_assert (new_bb != NULL);
31875 gimple_seq gseq = bb_seq (new_bb);
31878 tree convert_expr = build1 (CONVERT_EXPR, ptr_type_node,
31879 build_fold_addr_expr (version_decl));
31880 tree result_var = create_tmp_var (ptr_type_node);
31881 gimple *convert_stmt = gimple_build_assign (result_var, convert_expr);
31882 gimple *return_stmt = gimple_build_return (result_var);
31884 if (clone_isa == CLONE_DEFAULT)
31886 gimple_seq_add_stmt (&gseq, convert_stmt);
31887 gimple_seq_add_stmt (&gseq, return_stmt);
31888 set_bb_seq (new_bb, gseq);
31889 gimple_set_bb (convert_stmt, new_bb);
31890 gimple_set_bb (return_stmt, new_bb);
31891 pop_cfun ();
31892 return new_bb;
31895 tree bool_zero = build_int_cst (bool_int_type_node, 0);
31896 tree cond_var = create_tmp_var (bool_int_type_node);
31897 tree predicate_decl = rs6000_builtin_decls [(int) RS6000_BUILTIN_CPU_SUPPORTS];
31898 const char *arg_str = rs6000_clone_map[clone_isa].name;
31899 tree predicate_arg = build_string_literal (strlen (arg_str) + 1, arg_str);
31900 gimple *call_cond_stmt = gimple_build_call (predicate_decl, 1, predicate_arg);
31901 gimple_call_set_lhs (call_cond_stmt, cond_var);
31903 gimple_set_block (call_cond_stmt, DECL_INITIAL (function_decl));
31904 gimple_set_bb (call_cond_stmt, new_bb);
31905 gimple_seq_add_stmt (&gseq, call_cond_stmt);
31907 gimple *if_else_stmt = gimple_build_cond (NE_EXPR, cond_var, bool_zero,
31908 NULL_TREE, NULL_TREE);
31909 gimple_set_block (if_else_stmt, DECL_INITIAL (function_decl));
31910 gimple_set_bb (if_else_stmt, new_bb);
31911 gimple_seq_add_stmt (&gseq, if_else_stmt);
31913 gimple_seq_add_stmt (&gseq, convert_stmt);
31914 gimple_seq_add_stmt (&gseq, return_stmt);
31915 set_bb_seq (new_bb, gseq);
31917 basic_block bb1 = new_bb;
31918 edge e12 = split_block (bb1, if_else_stmt);
31919 basic_block bb2 = e12->dest;
31920 e12->flags &= ~EDGE_FALLTHRU;
31921 e12->flags |= EDGE_TRUE_VALUE;
31923 edge e23 = split_block (bb2, return_stmt);
31924 gimple_set_bb (convert_stmt, bb2);
31925 gimple_set_bb (return_stmt, bb2);
31927 basic_block bb3 = e23->dest;
31928 make_edge (bb1, bb3, EDGE_FALSE_VALUE);
31930 remove_edge (e23);
31931 make_edge (bb2, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
31933 pop_cfun ();
31934 return bb3;
31937 /* This function generates the dispatch function for multi-versioned functions.
31938 DISPATCH_DECL is the function which will contain the dispatch logic.
31939 FNDECLS are the function choices for dispatch, and is a tree chain.
31940 EMPTY_BB is the basic block pointer in DISPATCH_DECL in which the dispatch
31941 code is generated. */
31943 static int
31944 dispatch_function_versions (tree dispatch_decl,
31945 void *fndecls_p,
31946 basic_block *empty_bb)
31948 int ix;
31949 tree ele;
31950 vec<tree> *fndecls;
31951 tree clones[CLONE_MAX];
31953 if (TARGET_DEBUG_TARGET)
31954 fputs ("dispatch_function_versions, top\n", stderr);
31956 gcc_assert (dispatch_decl != NULL
31957 && fndecls_p != NULL
31958 && empty_bb != NULL);
31960 /* fndecls_p is actually a vector. */
31961 fndecls = static_cast<vec<tree> *> (fndecls_p);
31963 /* At least one more version other than the default. */
31964 gcc_assert (fndecls->length () >= 2);
31966 /* The first version in the vector is the default decl. */
31967 memset ((void *) clones, '\0', sizeof (clones));
31968 clones[CLONE_DEFAULT] = (*fndecls)[0];
31970 /* On the PowerPC, we do not need to call __builtin_cpu_init, which is a NOP
31971 on the PowerPC (on the x86_64, it is not a NOP). The builtin function
31972 __builtin_cpu_support ensures that the TOC fields are setup by requiring a
31973 recent glibc. If we ever need to call __builtin_cpu_init, we would need
31974 to insert the code here to do the call. */
31976 for (ix = 1; fndecls->iterate (ix, &ele); ++ix)
31978 int priority = rs6000_clone_priority (ele);
31979 if (!clones[priority])
31980 clones[priority] = ele;
31983 for (ix = CLONE_MAX - 1; ix >= 0; ix--)
31984 if (clones[ix])
31986 if (TARGET_DEBUG_TARGET)
31987 fprintf (stderr, "dispatch_function_versions, clone %d, %s\n",
31988 ix, get_decl_name (clones[ix]));
31990 *empty_bb = add_condition_to_bb (dispatch_decl, clones[ix], ix,
31991 *empty_bb);
31994 return 0;
31997 /* Generate the dispatching code body to dispatch multi-versioned function
31998 DECL. The target hook is called to process the "target" attributes and
31999 provide the code to dispatch the right function at run-time. NODE points
32000 to the dispatcher decl whose body will be created. */
32002 static tree
32003 rs6000_generate_version_dispatcher_body (void *node_p)
32005 tree resolver;
32006 basic_block empty_bb;
32007 struct cgraph_node *node = (cgraph_node *) node_p;
32008 struct cgraph_function_version_info *ninfo = node->function_version ();
32010 if (ninfo->dispatcher_resolver)
32011 return ninfo->dispatcher_resolver;
32013 /* node is going to be an alias, so remove the finalized bit. */
32014 node->definition = false;
32016 /* The first version in the chain corresponds to the default version. */
32017 ninfo->dispatcher_resolver = resolver
32018 = make_resolver_func (ninfo->next->this_node->decl, node->decl, &empty_bb);
32020 if (TARGET_DEBUG_TARGET)
32021 fprintf (stderr, "rs6000_get_function_versions_dispatcher, %s\n",
32022 get_decl_name (resolver));
32024 push_cfun (DECL_STRUCT_FUNCTION (resolver));
32025 auto_vec<tree, 2> fn_ver_vec;
32027 for (struct cgraph_function_version_info *vinfo = ninfo->next;
32028 vinfo;
32029 vinfo = vinfo->next)
32031 struct cgraph_node *version = vinfo->this_node;
32032 /* Check for virtual functions here again, as by this time it should
32033 have been determined if this function needs a vtable index or
32034 not. This happens for methods in derived classes that override
32035 virtual methods in base classes but are not explicitly marked as
32036 virtual. */
32037 if (DECL_VINDEX (version->decl))
32038 sorry ("Virtual function multiversioning not supported");
32040 fn_ver_vec.safe_push (version->decl);
32043 dispatch_function_versions (resolver, &fn_ver_vec, &empty_bb);
32044 cgraph_edge::rebuild_edges ();
32045 pop_cfun ();
32046 return resolver;
32050 /* Hook to determine if one function can safely inline another. */
32052 static bool
32053 rs6000_can_inline_p (tree caller, tree callee)
32055 bool ret = false;
32056 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
32057 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
32059 /* If callee has no option attributes, then it is ok to inline. */
32060 if (!callee_tree)
32061 ret = true;
32063 /* If caller has no option attributes, but callee does then it is not ok to
32064 inline. */
32065 else if (!caller_tree)
32066 ret = false;
32068 else
32070 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
32071 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
32073 /* Callee's options should a subset of the caller's, i.e. a vsx function
32074 can inline an altivec function but a non-vsx function can't inline a
32075 vsx function. */
32076 if ((caller_opts->x_rs6000_isa_flags & callee_opts->x_rs6000_isa_flags)
32077 == callee_opts->x_rs6000_isa_flags)
32078 ret = true;
32081 if (TARGET_DEBUG_TARGET)
32082 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
32083 get_decl_name (caller), get_decl_name (callee),
32084 (ret ? "can" : "cannot"));
32086 return ret;
32089 /* Allocate a stack temp and fixup the address so it meets the particular
32090 memory requirements (either offetable or REG+REG addressing). */
32093 rs6000_allocate_stack_temp (machine_mode mode,
32094 bool offsettable_p,
32095 bool reg_reg_p)
32097 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
32098 rtx addr = XEXP (stack, 0);
32099 int strict_p = reload_completed;
32101 if (!legitimate_indirect_address_p (addr, strict_p))
32103 if (offsettable_p
32104 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
32105 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
32107 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
32108 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
32111 return stack;
32114 /* Given a memory reference, if it is not a reg or reg+reg addressing,
32115 convert to such a form to deal with memory reference instructions
32116 like STFIWX and LDBRX that only take reg+reg addressing. */
32119 rs6000_force_indexed_or_indirect_mem (rtx x)
32121 machine_mode mode = GET_MODE (x);
32123 gcc_assert (MEM_P (x));
32124 if (can_create_pseudo_p () && !indexed_or_indirect_operand (x, mode))
32126 rtx addr = XEXP (x, 0);
32127 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
32129 rtx reg = XEXP (addr, 0);
32130 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
32131 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
32132 gcc_assert (REG_P (reg));
32133 emit_insn (gen_add3_insn (reg, reg, size_rtx));
32134 addr = reg;
32136 else if (GET_CODE (addr) == PRE_MODIFY)
32138 rtx reg = XEXP (addr, 0);
32139 rtx expr = XEXP (addr, 1);
32140 gcc_assert (REG_P (reg));
32141 gcc_assert (GET_CODE (expr) == PLUS);
32142 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
32143 addr = reg;
32146 if (GET_CODE (addr) == PLUS)
32148 rtx op0 = XEXP (addr, 0);
32149 rtx op1 = XEXP (addr, 1);
32150 op0 = force_reg (Pmode, op0);
32151 op1 = force_reg (Pmode, op1);
32152 x = replace_equiv_address (x, gen_rtx_PLUS (Pmode, op0, op1));
32154 else
32155 x = replace_equiv_address (x, force_reg (Pmode, addr));
32158 return x;
32161 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
32163 On the RS/6000, all integer constants are acceptable, most won't be valid
32164 for particular insns, though. Only easy FP constants are acceptable. */
32166 static bool
32167 rs6000_legitimate_constant_p (machine_mode mode, rtx x)
32169 if (TARGET_ELF && tls_referenced_p (x))
32170 return false;
32172 if (CONST_DOUBLE_P (x))
32173 return easy_fp_constant (x, mode);
32175 if (GET_CODE (x) == CONST_VECTOR)
32176 return easy_vector_constant (x, mode);
32178 return true;
32182 /* Return TRUE iff the sequence ending in LAST sets the static chain. */
32184 static bool
32185 chain_already_loaded (rtx_insn *last)
32187 for (; last != NULL; last = PREV_INSN (last))
32189 if (NONJUMP_INSN_P (last))
32191 rtx patt = PATTERN (last);
32193 if (GET_CODE (patt) == SET)
32195 rtx lhs = XEXP (patt, 0);
32197 if (REG_P (lhs) && REGNO (lhs) == STATIC_CHAIN_REGNUM)
32198 return true;
32202 return false;
32205 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
32207 void
32208 rs6000_call_aix (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
32210 rtx func = func_desc;
32211 rtx toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
32212 rtx toc_load = NULL_RTX;
32213 rtx toc_restore = NULL_RTX;
32214 rtx func_addr;
32215 rtx abi_reg = NULL_RTX;
32216 rtx call[4];
32217 int n_call;
32218 rtx insn;
32219 bool is_pltseq_longcall;
32221 if (global_tlsarg)
32222 tlsarg = global_tlsarg;
32224 /* Handle longcall attributes. */
32225 is_pltseq_longcall = false;
32226 if ((INTVAL (cookie) & CALL_LONG) != 0
32227 && GET_CODE (func_desc) == SYMBOL_REF)
32229 func = rs6000_longcall_ref (func_desc, tlsarg);
32230 if (TARGET_PLTSEQ)
32231 is_pltseq_longcall = true;
32234 /* Handle indirect calls. */
32235 if (!SYMBOL_REF_P (func)
32236 || (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (func)))
32238 if (!rs6000_pcrel_p (cfun))
32240 /* Save the TOC into its reserved slot before the call,
32241 and prepare to restore it after the call. */
32242 rtx stack_toc_offset = GEN_INT (RS6000_TOC_SAVE_SLOT);
32243 rtx stack_toc_unspec = gen_rtx_UNSPEC (Pmode,
32244 gen_rtvec (1, stack_toc_offset),
32245 UNSPEC_TOCSLOT);
32246 toc_restore = gen_rtx_SET (toc_reg, stack_toc_unspec);
32248 /* Can we optimize saving the TOC in the prologue or
32249 do we need to do it at every call? */
32250 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
32251 cfun->machine->save_toc_in_prologue = true;
32252 else
32254 rtx stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
32255 rtx stack_toc_mem = gen_frame_mem (Pmode,
32256 gen_rtx_PLUS (Pmode, stack_ptr,
32257 stack_toc_offset));
32258 MEM_VOLATILE_P (stack_toc_mem) = 1;
32259 if (is_pltseq_longcall)
32261 rtvec v = gen_rtvec (3, toc_reg, func_desc, tlsarg);
32262 rtx mark_toc_reg = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
32263 emit_insn (gen_rtx_SET (stack_toc_mem, mark_toc_reg));
32265 else
32266 emit_move_insn (stack_toc_mem, toc_reg);
32270 if (DEFAULT_ABI == ABI_ELFv2)
32272 /* A function pointer in the ELFv2 ABI is just a plain address, but
32273 the ABI requires it to be loaded into r12 before the call. */
32274 func_addr = gen_rtx_REG (Pmode, 12);
32275 if (!rtx_equal_p (func_addr, func))
32276 emit_move_insn (func_addr, func);
32277 abi_reg = func_addr;
32278 /* Indirect calls via CTR are strongly preferred over indirect
32279 calls via LR, so move the address there. Needed to mark
32280 this insn for linker plt sequence editing too. */
32281 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
32282 if (is_pltseq_longcall)
32284 rtvec v = gen_rtvec (3, abi_reg, func_desc, tlsarg);
32285 rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
32286 emit_insn (gen_rtx_SET (func_addr, mark_func));
32287 v = gen_rtvec (2, func_addr, func_desc);
32288 func_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
32290 else
32291 emit_move_insn (func_addr, abi_reg);
32293 else
32295 /* A function pointer under AIX is a pointer to a data area whose
32296 first word contains the actual address of the function, whose
32297 second word contains a pointer to its TOC, and whose third word
32298 contains a value to place in the static chain register (r11).
32299 Note that if we load the static chain, our "trampoline" need
32300 not have any executable code. */
32302 /* Load up address of the actual function. */
32303 func = force_reg (Pmode, func);
32304 func_addr = gen_reg_rtx (Pmode);
32305 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func));
32307 /* Indirect calls via CTR are strongly preferred over indirect
32308 calls via LR, so move the address there. */
32309 rtx ctr_reg = gen_rtx_REG (Pmode, CTR_REGNO);
32310 emit_move_insn (ctr_reg, func_addr);
32311 func_addr = ctr_reg;
32313 /* Prepare to load the TOC of the called function. Note that the
32314 TOC load must happen immediately before the actual call so
32315 that unwinding the TOC registers works correctly. See the
32316 comment in frob_update_context. */
32317 rtx func_toc_offset = GEN_INT (GET_MODE_SIZE (Pmode));
32318 rtx func_toc_mem = gen_rtx_MEM (Pmode,
32319 gen_rtx_PLUS (Pmode, func,
32320 func_toc_offset));
32321 toc_load = gen_rtx_USE (VOIDmode, func_toc_mem);
32323 /* If we have a static chain, load it up. But, if the call was
32324 originally direct, the 3rd word has not been written since no
32325 trampoline has been built, so we ought not to load it, lest we
32326 override a static chain value. */
32327 if (!(GET_CODE (func_desc) == SYMBOL_REF
32328 && SYMBOL_REF_FUNCTION_P (func_desc))
32329 && TARGET_POINTERS_TO_NESTED_FUNCTIONS
32330 && !chain_already_loaded (get_current_sequence ()->next->last))
32332 rtx sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
32333 rtx func_sc_offset = GEN_INT (2 * GET_MODE_SIZE (Pmode));
32334 rtx func_sc_mem = gen_rtx_MEM (Pmode,
32335 gen_rtx_PLUS (Pmode, func,
32336 func_sc_offset));
32337 emit_move_insn (sc_reg, func_sc_mem);
32338 abi_reg = sc_reg;
32342 else
32344 /* No TOC register needed for calls from PC-relative callers. */
32345 if (!rs6000_pcrel_p (cfun))
32346 /* Direct calls use the TOC: for local calls, the callee will
32347 assume the TOC register is set; for non-local calls, the
32348 PLT stub needs the TOC register. */
32349 abi_reg = toc_reg;
32350 func_addr = func;
32353 /* Create the call. */
32354 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
32355 if (value != NULL_RTX)
32356 call[0] = gen_rtx_SET (value, call[0]);
32357 n_call = 1;
32359 if (toc_load)
32360 call[n_call++] = toc_load;
32361 if (toc_restore)
32362 call[n_call++] = toc_restore;
32364 call[n_call++] = gen_hard_reg_clobber (Pmode, LR_REGNO);
32366 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n_call, call));
32367 insn = emit_call_insn (insn);
32369 /* Mention all registers defined by the ABI to hold information
32370 as uses in CALL_INSN_FUNCTION_USAGE. */
32371 if (abi_reg)
32372 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
32375 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
32377 void
32378 rs6000_sibcall_aix (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
32380 rtx call[2];
32381 rtx insn;
32383 gcc_assert (INTVAL (cookie) == 0);
32385 if (global_tlsarg)
32386 tlsarg = global_tlsarg;
32388 /* Create the call. */
32389 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_desc), tlsarg);
32390 if (value != NULL_RTX)
32391 call[0] = gen_rtx_SET (value, call[0]);
32393 call[1] = simple_return_rtx;
32395 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (2, call));
32396 insn = emit_call_insn (insn);
32398 /* Note use of the TOC register. */
32399 if (!rs6000_pcrel_p (cfun))
32400 use_reg (&CALL_INSN_FUNCTION_USAGE (insn),
32401 gen_rtx_REG (Pmode, TOC_REGNUM));
32404 /* Expand code to perform a call under the SYSV4 ABI. */
32406 void
32407 rs6000_call_sysv (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
32409 rtx func = func_desc;
32410 rtx func_addr;
32411 rtx call[4];
32412 rtx insn;
32413 rtx abi_reg = NULL_RTX;
32414 int n;
32416 if (global_tlsarg)
32417 tlsarg = global_tlsarg;
32419 /* Handle longcall attributes. */
32420 if ((INTVAL (cookie) & CALL_LONG) != 0
32421 && GET_CODE (func_desc) == SYMBOL_REF)
32423 func = rs6000_longcall_ref (func_desc, tlsarg);
32424 /* If the longcall was implemented as an inline PLT call using
32425 PLT unspecs then func will be REG:r11. If not, func will be
32426 a pseudo reg. The inline PLT call sequence supports lazy
32427 linking (and longcalls to functions in dlopen'd libraries).
32428 The other style of longcalls don't. The lazy linking entry
32429 to the dynamic symbol resolver requires r11 be the function
32430 address (as it is for linker generated PLT stubs). Ensure
32431 r11 stays valid to the bctrl by marking r11 used by the call. */
32432 if (TARGET_PLTSEQ)
32433 abi_reg = func;
32436 /* Handle indirect calls. */
32437 if (GET_CODE (func) != SYMBOL_REF)
32439 func = force_reg (Pmode, func);
32441 /* Indirect calls via CTR are strongly preferred over indirect
32442 calls via LR, so move the address there. That can't be left
32443 to reload because we want to mark every instruction in an
32444 inline PLT call sequence with a reloc, enabling the linker to
32445 edit the sequence back to a direct call when that makes sense. */
32446 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
32447 if (abi_reg)
32449 rtvec v = gen_rtvec (3, func, func_desc, tlsarg);
32450 rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
32451 emit_insn (gen_rtx_SET (func_addr, mark_func));
32452 v = gen_rtvec (2, func_addr, func_desc);
32453 func_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
32455 else
32456 emit_move_insn (func_addr, func);
32458 else
32459 func_addr = func;
32461 /* Create the call. */
32462 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
32463 if (value != NULL_RTX)
32464 call[0] = gen_rtx_SET (value, call[0]);
32466 call[1] = gen_rtx_USE (VOIDmode, cookie);
32467 n = 2;
32468 if (TARGET_SECURE_PLT
32469 && flag_pic
32470 && GET_CODE (func_addr) == SYMBOL_REF
32471 && !SYMBOL_REF_LOCAL_P (func_addr))
32472 call[n++] = gen_rtx_USE (VOIDmode, pic_offset_table_rtx);
32474 call[n++] = gen_hard_reg_clobber (Pmode, LR_REGNO);
32476 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n, call));
32477 insn = emit_call_insn (insn);
32478 if (abi_reg)
32479 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
32482 /* Expand code to perform a sibling call under the SysV4 ABI. */
32484 void
32485 rs6000_sibcall_sysv (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
32487 rtx func = func_desc;
32488 rtx func_addr;
32489 rtx call[3];
32490 rtx insn;
32491 rtx abi_reg = NULL_RTX;
32493 if (global_tlsarg)
32494 tlsarg = global_tlsarg;
32496 /* Handle longcall attributes. */
32497 if ((INTVAL (cookie) & CALL_LONG) != 0
32498 && GET_CODE (func_desc) == SYMBOL_REF)
32500 func = rs6000_longcall_ref (func_desc, tlsarg);
32501 /* If the longcall was implemented as an inline PLT call using
32502 PLT unspecs then func will be REG:r11. If not, func will be
32503 a pseudo reg. The inline PLT call sequence supports lazy
32504 linking (and longcalls to functions in dlopen'd libraries).
32505 The other style of longcalls don't. The lazy linking entry
32506 to the dynamic symbol resolver requires r11 be the function
32507 address (as it is for linker generated PLT stubs). Ensure
32508 r11 stays valid to the bctr by marking r11 used by the call. */
32509 if (TARGET_PLTSEQ)
32510 abi_reg = func;
32513 /* Handle indirect calls. */
32514 if (GET_CODE (func) != SYMBOL_REF)
32516 func = force_reg (Pmode, func);
32518 /* Indirect sibcalls must go via CTR. That can't be left to
32519 reload because we want to mark every instruction in an inline
32520 PLT call sequence with a reloc, enabling the linker to edit
32521 the sequence back to a direct call when that makes sense. */
32522 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
32523 if (abi_reg)
32525 rtvec v = gen_rtvec (3, func, func_desc, tlsarg);
32526 rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
32527 emit_insn (gen_rtx_SET (func_addr, mark_func));
32528 v = gen_rtvec (2, func_addr, func_desc);
32529 func_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
32531 else
32532 emit_move_insn (func_addr, func);
32534 else
32535 func_addr = func;
32537 /* Create the call. */
32538 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
32539 if (value != NULL_RTX)
32540 call[0] = gen_rtx_SET (value, call[0]);
32542 call[1] = gen_rtx_USE (VOIDmode, cookie);
32543 call[2] = simple_return_rtx;
32545 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (3, call));
32546 insn = emit_call_insn (insn);
32547 if (abi_reg)
32548 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
32551 #if TARGET_MACHO
32553 /* Expand code to perform a call under the Darwin ABI.
32554 Modulo handling of mlongcall, this is much the same as sysv.
32555 if/when the longcall optimisation is removed, we could drop this
32556 code and use the sysv case (taking care to avoid the tls stuff).
32558 We can use this for sibcalls too, if needed. */
32560 void
32561 rs6000_call_darwin_1 (rtx value, rtx func_desc, rtx tlsarg,
32562 rtx cookie, bool sibcall)
32564 rtx func = func_desc;
32565 rtx func_addr;
32566 rtx call[3];
32567 rtx insn;
32568 int cookie_val = INTVAL (cookie);
32569 bool make_island = false;
32571 /* Handle longcall attributes, there are two cases for Darwin:
32572 1) Newer linkers are capable of synthesising any branch islands needed.
32573 2) We need a helper branch island synthesised by the compiler.
32574 The second case has mostly been retired and we don't use it for m64.
32575 In fact, it's is an optimisation, we could just indirect as sysv does..
32576 ... however, backwards compatibility for now.
32577 If we're going to use this, then we need to keep the CALL_LONG bit set,
32578 so that we can pick up the special insn form later. */
32579 if ((cookie_val & CALL_LONG) != 0
32580 && GET_CODE (func_desc) == SYMBOL_REF)
32582 /* FIXME: the longcall opt should not hang off picsymbol stubs. */
32583 if (darwin_picsymbol_stubs && TARGET_32BIT)
32584 make_island = true; /* Do nothing yet, retain the CALL_LONG flag. */
32585 else
32587 /* The linker is capable of doing this, but the user explicitly
32588 asked for -mlongcall, so we'll do the 'normal' version. */
32589 func = rs6000_longcall_ref (func_desc, NULL_RTX);
32590 cookie_val &= ~CALL_LONG; /* Handled, zap it. */
32594 /* Handle indirect calls. */
32595 if (GET_CODE (func) != SYMBOL_REF)
32597 func = force_reg (Pmode, func);
32599 /* Indirect calls via CTR are strongly preferred over indirect
32600 calls via LR, and are required for indirect sibcalls, so move
32601 the address there. */
32602 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
32603 emit_move_insn (func_addr, func);
32605 else
32606 func_addr = func;
32608 /* Create the call. */
32609 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
32610 if (value != NULL_RTX)
32611 call[0] = gen_rtx_SET (value, call[0]);
32613 call[1] = gen_rtx_USE (VOIDmode, GEN_INT (cookie_val));
32615 if (sibcall)
32616 call[2] = simple_return_rtx;
32617 else
32618 call[2] = gen_hard_reg_clobber (Pmode, LR_REGNO);
32620 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (3, call));
32621 insn = emit_call_insn (insn);
32622 /* Now we have the debug info in the insn, we can set up the branch island
32623 if we're using one. */
32624 if (make_island)
32626 tree funname = get_identifier (XSTR (func_desc, 0));
32628 if (no_previous_def (funname))
32630 rtx label_rtx = gen_label_rtx ();
32631 char *label_buf, temp_buf[256];
32632 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
32633 CODE_LABEL_NUMBER (label_rtx));
32634 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
32635 tree labelname = get_identifier (label_buf);
32636 add_compiler_branch_island (labelname, funname,
32637 insn_line ((const rtx_insn*)insn));
32641 #endif
32643 void
32644 rs6000_call_darwin (rtx value ATTRIBUTE_UNUSED, rtx func_desc ATTRIBUTE_UNUSED,
32645 rtx tlsarg ATTRIBUTE_UNUSED, rtx cookie ATTRIBUTE_UNUSED)
32647 #if TARGET_MACHO
32648 rs6000_call_darwin_1 (value, func_desc, tlsarg, cookie, false);
32649 #else
32650 gcc_unreachable();
32651 #endif
32655 void
32656 rs6000_sibcall_darwin (rtx value ATTRIBUTE_UNUSED, rtx func_desc ATTRIBUTE_UNUSED,
32657 rtx tlsarg ATTRIBUTE_UNUSED, rtx cookie ATTRIBUTE_UNUSED)
32659 #if TARGET_MACHO
32660 rs6000_call_darwin_1 (value, func_desc, tlsarg, cookie, true);
32661 #else
32662 gcc_unreachable();
32663 #endif
32666 /* Return whether we should generate PC-relative code for FNDECL. */
32667 bool
32668 rs6000_fndecl_pcrel_p (const_tree fndecl)
32670 if (DEFAULT_ABI != ABI_ELFv2)
32671 return false;
32673 struct cl_target_option *opts = target_opts_for_fn (fndecl);
32675 return ((opts->x_rs6000_isa_flags & OPTION_MASK_PCREL) != 0
32676 && TARGET_CMODEL == CMODEL_MEDIUM);
32679 /* Return whether we should generate PC-relative code for *FN. */
32680 bool
32681 rs6000_pcrel_p (struct function *fn)
32683 if (DEFAULT_ABI != ABI_ELFv2)
32684 return false;
32686 /* Optimize usual case. */
32687 if (fn == cfun)
32688 return ((rs6000_isa_flags & OPTION_MASK_PCREL) != 0
32689 && TARGET_CMODEL == CMODEL_MEDIUM);
32691 return rs6000_fndecl_pcrel_p (fn->decl);
32694 #ifdef HAVE_GAS_HIDDEN
32695 # define USE_HIDDEN_LINKONCE 1
32696 #else
32697 # define USE_HIDDEN_LINKONCE 0
32698 #endif
32700 /* Fills in the label name that should be used for a 476 link stack thunk. */
32702 void
32703 get_ppc476_thunk_name (char name[32])
32705 gcc_assert (TARGET_LINK_STACK);
32707 if (USE_HIDDEN_LINKONCE)
32708 sprintf (name, "__ppc476.get_thunk");
32709 else
32710 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
32713 /* This function emits the simple thunk routine that is used to preserve
32714 the link stack on the 476 cpu. */
32716 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
32717 static void
32718 rs6000_code_end (void)
32720 char name[32];
32721 tree decl;
32723 if (!TARGET_LINK_STACK)
32724 return;
32726 get_ppc476_thunk_name (name);
32728 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
32729 build_function_type_list (void_type_node, NULL_TREE));
32730 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
32731 NULL_TREE, void_type_node);
32732 TREE_PUBLIC (decl) = 1;
32733 TREE_STATIC (decl) = 1;
32735 #if RS6000_WEAK
32736 if (USE_HIDDEN_LINKONCE && !TARGET_XCOFF)
32738 cgraph_node::create (decl)->set_comdat_group (DECL_ASSEMBLER_NAME (decl));
32739 targetm.asm_out.unique_section (decl, 0);
32740 switch_to_section (get_named_section (decl, NULL, 0));
32741 DECL_WEAK (decl) = 1;
32742 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
32743 targetm.asm_out.globalize_label (asm_out_file, name);
32744 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
32745 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
32747 else
32748 #endif
32750 switch_to_section (text_section);
32751 ASM_OUTPUT_LABEL (asm_out_file, name);
32754 DECL_INITIAL (decl) = make_node (BLOCK);
32755 current_function_decl = decl;
32756 allocate_struct_function (decl, false);
32757 init_function_start (decl);
32758 first_function_block_is_cold = false;
32759 /* Make sure unwind info is emitted for the thunk if needed. */
32760 final_start_function (emit_barrier (), asm_out_file, 1);
32762 fputs ("\tblr\n", asm_out_file);
32764 final_end_function ();
32765 init_insn_lengths ();
32766 free_after_compilation (cfun);
32767 set_cfun (NULL);
32768 current_function_decl = NULL;
32771 /* Add r30 to hard reg set if the prologue sets it up and it is not
32772 pic_offset_table_rtx. */
32774 static void
32775 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
32777 if (!TARGET_SINGLE_PIC_BASE
32778 && TARGET_TOC
32779 && TARGET_MINIMAL_TOC
32780 && !constant_pool_empty_p ())
32781 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
32782 if (cfun->machine->split_stack_argp_used)
32783 add_to_hard_reg_set (&set->set, Pmode, 12);
32785 /* Make sure the hard reg set doesn't include r2, which was possibly added
32786 via PIC_OFFSET_TABLE_REGNUM. */
32787 if (TARGET_TOC)
32788 remove_from_hard_reg_set (&set->set, Pmode, TOC_REGNUM);
32792 /* Helper function for rs6000_split_logical to emit a logical instruction after
32793 spliting the operation to single GPR registers.
32795 DEST is the destination register.
32796 OP1 and OP2 are the input source registers.
32797 CODE is the base operation (AND, IOR, XOR, NOT).
32798 MODE is the machine mode.
32799 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
32800 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
32801 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
32803 static void
32804 rs6000_split_logical_inner (rtx dest,
32805 rtx op1,
32806 rtx op2,
32807 enum rtx_code code,
32808 machine_mode mode,
32809 bool complement_final_p,
32810 bool complement_op1_p,
32811 bool complement_op2_p)
32813 rtx bool_rtx;
32815 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
32816 if (op2 && CONST_INT_P (op2)
32817 && (mode == SImode || (mode == DImode && TARGET_POWERPC64))
32818 && !complement_final_p && !complement_op1_p && !complement_op2_p)
32820 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
32821 HOST_WIDE_INT value = INTVAL (op2) & mask;
32823 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
32824 if (code == AND)
32826 if (value == 0)
32828 emit_insn (gen_rtx_SET (dest, const0_rtx));
32829 return;
32832 else if (value == mask)
32834 if (!rtx_equal_p (dest, op1))
32835 emit_insn (gen_rtx_SET (dest, op1));
32836 return;
32840 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
32841 into separate ORI/ORIS or XORI/XORIS instrucitons. */
32842 else if (code == IOR || code == XOR)
32844 if (value == 0)
32846 if (!rtx_equal_p (dest, op1))
32847 emit_insn (gen_rtx_SET (dest, op1));
32848 return;
32853 if (code == AND && mode == SImode
32854 && !complement_final_p && !complement_op1_p && !complement_op2_p)
32856 emit_insn (gen_andsi3 (dest, op1, op2));
32857 return;
32860 if (complement_op1_p)
32861 op1 = gen_rtx_NOT (mode, op1);
32863 if (complement_op2_p)
32864 op2 = gen_rtx_NOT (mode, op2);
32866 /* For canonical RTL, if only one arm is inverted it is the first. */
32867 if (!complement_op1_p && complement_op2_p)
32868 std::swap (op1, op2);
32870 bool_rtx = ((code == NOT)
32871 ? gen_rtx_NOT (mode, op1)
32872 : gen_rtx_fmt_ee (code, mode, op1, op2));
32874 if (complement_final_p)
32875 bool_rtx = gen_rtx_NOT (mode, bool_rtx);
32877 emit_insn (gen_rtx_SET (dest, bool_rtx));
32880 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
32881 operations are split immediately during RTL generation to allow for more
32882 optimizations of the AND/IOR/XOR.
32884 OPERANDS is an array containing the destination and two input operands.
32885 CODE is the base operation (AND, IOR, XOR, NOT).
32886 MODE is the machine mode.
32887 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
32888 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
32889 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
32890 CLOBBER_REG is either NULL or a scratch register of type CC to allow
32891 formation of the AND instructions. */
32893 static void
32894 rs6000_split_logical_di (rtx operands[3],
32895 enum rtx_code code,
32896 bool complement_final_p,
32897 bool complement_op1_p,
32898 bool complement_op2_p)
32900 const HOST_WIDE_INT lower_32bits = HOST_WIDE_INT_C(0xffffffff);
32901 const HOST_WIDE_INT upper_32bits = ~ lower_32bits;
32902 const HOST_WIDE_INT sign_bit = HOST_WIDE_INT_C(0x80000000);
32903 enum hi_lo { hi = 0, lo = 1 };
32904 rtx op0_hi_lo[2], op1_hi_lo[2], op2_hi_lo[2];
32905 size_t i;
32907 op0_hi_lo[hi] = gen_highpart (SImode, operands[0]);
32908 op1_hi_lo[hi] = gen_highpart (SImode, operands[1]);
32909 op0_hi_lo[lo] = gen_lowpart (SImode, operands[0]);
32910 op1_hi_lo[lo] = gen_lowpart (SImode, operands[1]);
32912 if (code == NOT)
32913 op2_hi_lo[hi] = op2_hi_lo[lo] = NULL_RTX;
32914 else
32916 if (!CONST_INT_P (operands[2]))
32918 op2_hi_lo[hi] = gen_highpart_mode (SImode, DImode, operands[2]);
32919 op2_hi_lo[lo] = gen_lowpart (SImode, operands[2]);
32921 else
32923 HOST_WIDE_INT value = INTVAL (operands[2]);
32924 HOST_WIDE_INT value_hi_lo[2];
32926 gcc_assert (!complement_final_p);
32927 gcc_assert (!complement_op1_p);
32928 gcc_assert (!complement_op2_p);
32930 value_hi_lo[hi] = value >> 32;
32931 value_hi_lo[lo] = value & lower_32bits;
32933 for (i = 0; i < 2; i++)
32935 HOST_WIDE_INT sub_value = value_hi_lo[i];
32937 if (sub_value & sign_bit)
32938 sub_value |= upper_32bits;
32940 op2_hi_lo[i] = GEN_INT (sub_value);
32942 /* If this is an AND instruction, check to see if we need to load
32943 the value in a register. */
32944 if (code == AND && sub_value != -1 && sub_value != 0
32945 && !and_operand (op2_hi_lo[i], SImode))
32946 op2_hi_lo[i] = force_reg (SImode, op2_hi_lo[i]);
32951 for (i = 0; i < 2; i++)
32953 /* Split large IOR/XOR operations. */
32954 if ((code == IOR || code == XOR)
32955 && CONST_INT_P (op2_hi_lo[i])
32956 && !complement_final_p
32957 && !complement_op1_p
32958 && !complement_op2_p
32959 && !logical_const_operand (op2_hi_lo[i], SImode))
32961 HOST_WIDE_INT value = INTVAL (op2_hi_lo[i]);
32962 HOST_WIDE_INT hi_16bits = value & HOST_WIDE_INT_C(0xffff0000);
32963 HOST_WIDE_INT lo_16bits = value & HOST_WIDE_INT_C(0x0000ffff);
32964 rtx tmp = gen_reg_rtx (SImode);
32966 /* Make sure the constant is sign extended. */
32967 if ((hi_16bits & sign_bit) != 0)
32968 hi_16bits |= upper_32bits;
32970 rs6000_split_logical_inner (tmp, op1_hi_lo[i], GEN_INT (hi_16bits),
32971 code, SImode, false, false, false);
32973 rs6000_split_logical_inner (op0_hi_lo[i], tmp, GEN_INT (lo_16bits),
32974 code, SImode, false, false, false);
32976 else
32977 rs6000_split_logical_inner (op0_hi_lo[i], op1_hi_lo[i], op2_hi_lo[i],
32978 code, SImode, complement_final_p,
32979 complement_op1_p, complement_op2_p);
32982 return;
32985 /* Split the insns that make up boolean operations operating on multiple GPR
32986 registers. The boolean MD patterns ensure that the inputs either are
32987 exactly the same as the output registers, or there is no overlap.
32989 OPERANDS is an array containing the destination and two input operands.
32990 CODE is the base operation (AND, IOR, XOR, NOT).
32991 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
32992 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
32993 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
32995 void
32996 rs6000_split_logical (rtx operands[3],
32997 enum rtx_code code,
32998 bool complement_final_p,
32999 bool complement_op1_p,
33000 bool complement_op2_p)
33002 machine_mode mode = GET_MODE (operands[0]);
33003 machine_mode sub_mode;
33004 rtx op0, op1, op2;
33005 int sub_size, regno0, regno1, nregs, i;
33007 /* If this is DImode, use the specialized version that can run before
33008 register allocation. */
33009 if (mode == DImode && !TARGET_POWERPC64)
33011 rs6000_split_logical_di (operands, code, complement_final_p,
33012 complement_op1_p, complement_op2_p);
33013 return;
33016 op0 = operands[0];
33017 op1 = operands[1];
33018 op2 = (code == NOT) ? NULL_RTX : operands[2];
33019 sub_mode = (TARGET_POWERPC64) ? DImode : SImode;
33020 sub_size = GET_MODE_SIZE (sub_mode);
33021 regno0 = REGNO (op0);
33022 regno1 = REGNO (op1);
33024 gcc_assert (reload_completed);
33025 gcc_assert (IN_RANGE (regno0, FIRST_GPR_REGNO, LAST_GPR_REGNO));
33026 gcc_assert (IN_RANGE (regno1, FIRST_GPR_REGNO, LAST_GPR_REGNO));
33028 nregs = rs6000_hard_regno_nregs[(int)mode][regno0];
33029 gcc_assert (nregs > 1);
33031 if (op2 && REG_P (op2))
33032 gcc_assert (IN_RANGE (REGNO (op2), FIRST_GPR_REGNO, LAST_GPR_REGNO));
33034 for (i = 0; i < nregs; i++)
33036 int offset = i * sub_size;
33037 rtx sub_op0 = simplify_subreg (sub_mode, op0, mode, offset);
33038 rtx sub_op1 = simplify_subreg (sub_mode, op1, mode, offset);
33039 rtx sub_op2 = ((code == NOT)
33040 ? NULL_RTX
33041 : simplify_subreg (sub_mode, op2, mode, offset));
33043 rs6000_split_logical_inner (sub_op0, sub_op1, sub_op2, code, sub_mode,
33044 complement_final_p, complement_op1_p,
33045 complement_op2_p);
33048 return;
33052 /* Return true if the peephole2 can combine a load involving a combination of
33053 an addis instruction and a load with an offset that can be fused together on
33054 a power8. */
33056 bool
33057 fusion_gpr_load_p (rtx addis_reg, /* register set via addis. */
33058 rtx addis_value, /* addis value. */
33059 rtx target, /* target register that is loaded. */
33060 rtx mem) /* bottom part of the memory addr. */
33062 rtx addr;
33063 rtx base_reg;
33065 /* Validate arguments. */
33066 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
33067 return false;
33069 if (!base_reg_operand (target, GET_MODE (target)))
33070 return false;
33072 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
33073 return false;
33075 /* Allow sign/zero extension. */
33076 if (GET_CODE (mem) == ZERO_EXTEND
33077 || (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN))
33078 mem = XEXP (mem, 0);
33080 if (!MEM_P (mem))
33081 return false;
33083 if (!fusion_gpr_mem_load (mem, GET_MODE (mem)))
33084 return false;
33086 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
33087 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
33088 return false;
33090 /* Validate that the register used to load the high value is either the
33091 register being loaded, or we can safely replace its use.
33093 This function is only called from the peephole2 pass and we assume that
33094 there are 2 instructions in the peephole (addis and load), so we want to
33095 check if the target register was not used in the memory address and the
33096 register to hold the addis result is dead after the peephole. */
33097 if (REGNO (addis_reg) != REGNO (target))
33099 if (reg_mentioned_p (target, mem))
33100 return false;
33102 if (!peep2_reg_dead_p (2, addis_reg))
33103 return false;
33105 /* If the target register being loaded is the stack pointer, we must
33106 avoid loading any other value into it, even temporarily. */
33107 if (REG_P (target) && REGNO (target) == STACK_POINTER_REGNUM)
33108 return false;
33111 base_reg = XEXP (addr, 0);
33112 return REGNO (addis_reg) == REGNO (base_reg);
33115 /* During the peephole2 pass, adjust and expand the insns for a load fusion
33116 sequence. We adjust the addis register to use the target register. If the
33117 load sign extends, we adjust the code to do the zero extending load, and an
33118 explicit sign extension later since the fusion only covers zero extending
33119 loads.
33121 The operands are:
33122 operands[0] register set with addis (to be replaced with target)
33123 operands[1] value set via addis
33124 operands[2] target register being loaded
33125 operands[3] D-form memory reference using operands[0]. */
33127 void
33128 expand_fusion_gpr_load (rtx *operands)
33130 rtx addis_value = operands[1];
33131 rtx target = operands[2];
33132 rtx orig_mem = operands[3];
33133 rtx new_addr, new_mem, orig_addr, offset;
33134 enum rtx_code plus_or_lo_sum;
33135 machine_mode target_mode = GET_MODE (target);
33136 machine_mode extend_mode = target_mode;
33137 machine_mode ptr_mode = Pmode;
33138 enum rtx_code extend = UNKNOWN;
33140 if (GET_CODE (orig_mem) == ZERO_EXTEND
33141 || (TARGET_P8_FUSION_SIGN && GET_CODE (orig_mem) == SIGN_EXTEND))
33143 extend = GET_CODE (orig_mem);
33144 orig_mem = XEXP (orig_mem, 0);
33145 target_mode = GET_MODE (orig_mem);
33148 gcc_assert (MEM_P (orig_mem));
33150 orig_addr = XEXP (orig_mem, 0);
33151 plus_or_lo_sum = GET_CODE (orig_addr);
33152 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
33154 offset = XEXP (orig_addr, 1);
33155 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
33156 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
33158 if (extend != UNKNOWN)
33159 new_mem = gen_rtx_fmt_e (ZERO_EXTEND, extend_mode, new_mem);
33161 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
33162 UNSPEC_FUSION_GPR);
33163 emit_insn (gen_rtx_SET (target, new_mem));
33165 if (extend == SIGN_EXTEND)
33167 int sub_off = ((BYTES_BIG_ENDIAN)
33168 ? GET_MODE_SIZE (extend_mode) - GET_MODE_SIZE (target_mode)
33169 : 0);
33170 rtx sign_reg
33171 = simplify_subreg (target_mode, target, extend_mode, sub_off);
33173 emit_insn (gen_rtx_SET (target,
33174 gen_rtx_SIGN_EXTEND (extend_mode, sign_reg)));
33177 return;
33180 /* Emit the addis instruction that will be part of a fused instruction
33181 sequence. */
33183 void
33184 emit_fusion_addis (rtx target, rtx addis_value)
33186 rtx fuse_ops[10];
33187 const char *addis_str = NULL;
33189 /* Emit the addis instruction. */
33190 fuse_ops[0] = target;
33191 if (satisfies_constraint_L (addis_value))
33193 fuse_ops[1] = addis_value;
33194 addis_str = "lis %0,%v1";
33197 else if (GET_CODE (addis_value) == PLUS)
33199 rtx op0 = XEXP (addis_value, 0);
33200 rtx op1 = XEXP (addis_value, 1);
33202 if (REG_P (op0) && CONST_INT_P (op1)
33203 && satisfies_constraint_L (op1))
33205 fuse_ops[1] = op0;
33206 fuse_ops[2] = op1;
33207 addis_str = "addis %0,%1,%v2";
33211 else if (GET_CODE (addis_value) == HIGH)
33213 rtx value = XEXP (addis_value, 0);
33214 if (GET_CODE (value) == UNSPEC && XINT (value, 1) == UNSPEC_TOCREL)
33216 fuse_ops[1] = XVECEXP (value, 0, 0); /* symbol ref. */
33217 fuse_ops[2] = XVECEXP (value, 0, 1); /* TOC register. */
33218 if (TARGET_ELF)
33219 addis_str = "addis %0,%2,%1@toc@ha";
33221 else if (TARGET_XCOFF)
33222 addis_str = "addis %0,%1@u(%2)";
33224 else
33225 gcc_unreachable ();
33228 else if (GET_CODE (value) == PLUS)
33230 rtx op0 = XEXP (value, 0);
33231 rtx op1 = XEXP (value, 1);
33233 if (GET_CODE (op0) == UNSPEC
33234 && XINT (op0, 1) == UNSPEC_TOCREL
33235 && CONST_INT_P (op1))
33237 fuse_ops[1] = XVECEXP (op0, 0, 0); /* symbol ref. */
33238 fuse_ops[2] = XVECEXP (op0, 0, 1); /* TOC register. */
33239 fuse_ops[3] = op1;
33240 if (TARGET_ELF)
33241 addis_str = "addis %0,%2,%1+%3@toc@ha";
33243 else if (TARGET_XCOFF)
33244 addis_str = "addis %0,%1+%3@u(%2)";
33246 else
33247 gcc_unreachable ();
33251 else if (satisfies_constraint_L (value))
33253 fuse_ops[1] = value;
33254 addis_str = "lis %0,%v1";
33257 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (value))
33259 fuse_ops[1] = value;
33260 addis_str = "lis %0,%1@ha";
33264 if (!addis_str)
33265 fatal_insn ("Could not generate addis value for fusion", addis_value);
33267 output_asm_insn (addis_str, fuse_ops);
33270 /* Emit a D-form load or store instruction that is the second instruction
33271 of a fusion sequence. */
33273 static void
33274 emit_fusion_load (rtx load_reg, rtx addis_reg, rtx offset, const char *insn_str)
33276 rtx fuse_ops[10];
33277 char insn_template[80];
33279 fuse_ops[0] = load_reg;
33280 fuse_ops[1] = addis_reg;
33282 if (CONST_INT_P (offset) && satisfies_constraint_I (offset))
33284 sprintf (insn_template, "%s %%0,%%2(%%1)", insn_str);
33285 fuse_ops[2] = offset;
33286 output_asm_insn (insn_template, fuse_ops);
33289 else if (GET_CODE (offset) == UNSPEC
33290 && XINT (offset, 1) == UNSPEC_TOCREL)
33292 if (TARGET_ELF)
33293 sprintf (insn_template, "%s %%0,%%2@toc@l(%%1)", insn_str);
33295 else if (TARGET_XCOFF)
33296 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
33298 else
33299 gcc_unreachable ();
33301 fuse_ops[2] = XVECEXP (offset, 0, 0);
33302 output_asm_insn (insn_template, fuse_ops);
33305 else if (GET_CODE (offset) == PLUS
33306 && GET_CODE (XEXP (offset, 0)) == UNSPEC
33307 && XINT (XEXP (offset, 0), 1) == UNSPEC_TOCREL
33308 && CONST_INT_P (XEXP (offset, 1)))
33310 rtx tocrel_unspec = XEXP (offset, 0);
33311 if (TARGET_ELF)
33312 sprintf (insn_template, "%s %%0,%%2+%%3@toc@l(%%1)", insn_str);
33314 else if (TARGET_XCOFF)
33315 sprintf (insn_template, "%s %%0,%%2+%%3@l(%%1)", insn_str);
33317 else
33318 gcc_unreachable ();
33320 fuse_ops[2] = XVECEXP (tocrel_unspec, 0, 0);
33321 fuse_ops[3] = XEXP (offset, 1);
33322 output_asm_insn (insn_template, fuse_ops);
33325 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (offset))
33327 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
33329 fuse_ops[2] = offset;
33330 output_asm_insn (insn_template, fuse_ops);
33333 else
33334 fatal_insn ("Unable to generate load/store offset for fusion", offset);
33336 return;
33339 /* Given an address, convert it into the addis and load offset parts. Addresses
33340 created during the peephole2 process look like:
33341 (lo_sum (high (unspec [(sym)] UNSPEC_TOCREL))
33342 (unspec [(...)] UNSPEC_TOCREL)) */
33344 static void
33345 fusion_split_address (rtx addr, rtx *p_hi, rtx *p_lo)
33347 rtx hi, lo;
33349 if (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
33351 hi = XEXP (addr, 0);
33352 lo = XEXP (addr, 1);
33354 else
33355 gcc_unreachable ();
33357 *p_hi = hi;
33358 *p_lo = lo;
33361 /* Return a string to fuse an addis instruction with a gpr load to the same
33362 register that we loaded up the addis instruction. The address that is used
33363 is the logical address that was formed during peephole2:
33364 (lo_sum (high) (low-part))
33366 The code is complicated, so we call output_asm_insn directly, and just
33367 return "". */
33369 const char *
33370 emit_fusion_gpr_load (rtx target, rtx mem)
33372 rtx addis_value;
33373 rtx addr;
33374 rtx load_offset;
33375 const char *load_str = NULL;
33376 machine_mode mode;
33378 if (GET_CODE (mem) == ZERO_EXTEND)
33379 mem = XEXP (mem, 0);
33381 gcc_assert (REG_P (target) && MEM_P (mem));
33383 addr = XEXP (mem, 0);
33384 fusion_split_address (addr, &addis_value, &load_offset);
33386 /* Now emit the load instruction to the same register. */
33387 mode = GET_MODE (mem);
33388 switch (mode)
33390 case E_QImode:
33391 load_str = "lbz";
33392 break;
33394 case E_HImode:
33395 load_str = "lhz";
33396 break;
33398 case E_SImode:
33399 case E_SFmode:
33400 load_str = "lwz";
33401 break;
33403 case E_DImode:
33404 case E_DFmode:
33405 gcc_assert (TARGET_POWERPC64);
33406 load_str = "ld";
33407 break;
33409 default:
33410 fatal_insn ("Bad GPR fusion", gen_rtx_SET (target, mem));
33413 /* Emit the addis instruction. */
33414 emit_fusion_addis (target, addis_value);
33416 /* Emit the D-form load instruction. */
33417 emit_fusion_load (target, target, load_offset, load_str);
33419 return "";
33423 #ifdef RS6000_GLIBC_ATOMIC_FENV
33424 /* Function declarations for rs6000_atomic_assign_expand_fenv. */
33425 static tree atomic_hold_decl, atomic_clear_decl, atomic_update_decl;
33426 #endif
33428 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
33430 static void
33431 rs6000_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
33433 if (!TARGET_HARD_FLOAT)
33435 #ifdef RS6000_GLIBC_ATOMIC_FENV
33436 if (atomic_hold_decl == NULL_TREE)
33438 atomic_hold_decl
33439 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
33440 get_identifier ("__atomic_feholdexcept"),
33441 build_function_type_list (void_type_node,
33442 double_ptr_type_node,
33443 NULL_TREE));
33444 TREE_PUBLIC (atomic_hold_decl) = 1;
33445 DECL_EXTERNAL (atomic_hold_decl) = 1;
33448 if (atomic_clear_decl == NULL_TREE)
33450 atomic_clear_decl
33451 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
33452 get_identifier ("__atomic_feclearexcept"),
33453 build_function_type_list (void_type_node,
33454 NULL_TREE));
33455 TREE_PUBLIC (atomic_clear_decl) = 1;
33456 DECL_EXTERNAL (atomic_clear_decl) = 1;
33459 tree const_double = build_qualified_type (double_type_node,
33460 TYPE_QUAL_CONST);
33461 tree const_double_ptr = build_pointer_type (const_double);
33462 if (atomic_update_decl == NULL_TREE)
33464 atomic_update_decl
33465 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
33466 get_identifier ("__atomic_feupdateenv"),
33467 build_function_type_list (void_type_node,
33468 const_double_ptr,
33469 NULL_TREE));
33470 TREE_PUBLIC (atomic_update_decl) = 1;
33471 DECL_EXTERNAL (atomic_update_decl) = 1;
33474 tree fenv_var = create_tmp_var_raw (double_type_node);
33475 TREE_ADDRESSABLE (fenv_var) = 1;
33476 tree fenv_addr = build1 (ADDR_EXPR, double_ptr_type_node, fenv_var);
33478 *hold = build_call_expr (atomic_hold_decl, 1, fenv_addr);
33479 *clear = build_call_expr (atomic_clear_decl, 0);
33480 *update = build_call_expr (atomic_update_decl, 1,
33481 fold_convert (const_double_ptr, fenv_addr));
33482 #endif
33483 return;
33486 tree mffs = rs6000_builtin_decls[RS6000_BUILTIN_MFFS];
33487 tree mtfsf = rs6000_builtin_decls[RS6000_BUILTIN_MTFSF];
33488 tree call_mffs = build_call_expr (mffs, 0);
33490 /* Generates the equivalent of feholdexcept (&fenv_var)
33492 *fenv_var = __builtin_mffs ();
33493 double fenv_hold;
33494 *(uint64_t*)&fenv_hold = *(uint64_t*)fenv_var & 0xffffffff00000007LL;
33495 __builtin_mtfsf (0xff, fenv_hold); */
33497 /* Mask to clear everything except for the rounding modes and non-IEEE
33498 arithmetic flag. */
33499 const unsigned HOST_WIDE_INT hold_exception_mask =
33500 HOST_WIDE_INT_C (0xffffffff00000007);
33502 tree fenv_var = create_tmp_var_raw (double_type_node);
33504 tree hold_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_var, call_mffs);
33506 tree fenv_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_var);
33507 tree fenv_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
33508 build_int_cst (uint64_type_node,
33509 hold_exception_mask));
33511 tree fenv_hold_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
33512 fenv_llu_and);
33514 tree hold_mtfsf = build_call_expr (mtfsf, 2,
33515 build_int_cst (unsigned_type_node, 0xff),
33516 fenv_hold_mtfsf);
33518 *hold = build2 (COMPOUND_EXPR, void_type_node, hold_mffs, hold_mtfsf);
33520 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT):
33522 double fenv_clear = __builtin_mffs ();
33523 *(uint64_t)&fenv_clear &= 0xffffffff00000000LL;
33524 __builtin_mtfsf (0xff, fenv_clear); */
33526 /* Mask to clear everything except for the rounding modes and non-IEEE
33527 arithmetic flag. */
33528 const unsigned HOST_WIDE_INT clear_exception_mask =
33529 HOST_WIDE_INT_C (0xffffffff00000000);
33531 tree fenv_clear = create_tmp_var_raw (double_type_node);
33533 tree clear_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_clear, call_mffs);
33535 tree fenv_clean_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_clear);
33536 tree fenv_clear_llu_and = build2 (BIT_AND_EXPR, uint64_type_node,
33537 fenv_clean_llu,
33538 build_int_cst (uint64_type_node,
33539 clear_exception_mask));
33541 tree fenv_clear_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
33542 fenv_clear_llu_and);
33544 tree clear_mtfsf = build_call_expr (mtfsf, 2,
33545 build_int_cst (unsigned_type_node, 0xff),
33546 fenv_clear_mtfsf);
33548 *clear = build2 (COMPOUND_EXPR, void_type_node, clear_mffs, clear_mtfsf);
33550 /* Generates the equivalent of feupdateenv (&fenv_var)
33552 double old_fenv = __builtin_mffs ();
33553 double fenv_update;
33554 *(uint64_t*)&fenv_update = (*(uint64_t*)&old & 0xffffffff1fffff00LL) |
33555 (*(uint64_t*)fenv_var 0x1ff80fff);
33556 __builtin_mtfsf (0xff, fenv_update); */
33558 const unsigned HOST_WIDE_INT update_exception_mask =
33559 HOST_WIDE_INT_C (0xffffffff1fffff00);
33560 const unsigned HOST_WIDE_INT new_exception_mask =
33561 HOST_WIDE_INT_C (0x1ff80fff);
33563 tree old_fenv = create_tmp_var_raw (double_type_node);
33564 tree update_mffs = build2 (MODIFY_EXPR, void_type_node, old_fenv, call_mffs);
33566 tree old_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, old_fenv);
33567 tree old_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, old_llu,
33568 build_int_cst (uint64_type_node,
33569 update_exception_mask));
33571 tree new_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
33572 build_int_cst (uint64_type_node,
33573 new_exception_mask));
33575 tree new_llu_mask = build2 (BIT_IOR_EXPR, uint64_type_node,
33576 old_llu_and, new_llu_and);
33578 tree fenv_update_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
33579 new_llu_mask);
33581 tree update_mtfsf = build_call_expr (mtfsf, 2,
33582 build_int_cst (unsigned_type_node, 0xff),
33583 fenv_update_mtfsf);
33585 *update = build2 (COMPOUND_EXPR, void_type_node, update_mffs, update_mtfsf);
33588 void
33589 rs6000_generate_float2_double_code (rtx dst, rtx src1, rtx src2)
33591 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
33593 rtx_tmp0 = gen_reg_rtx (V2DFmode);
33594 rtx_tmp1 = gen_reg_rtx (V2DFmode);
33596 /* The destination of the vmrgew instruction layout is:
33597 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
33598 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
33599 vmrgew instruction will be correct. */
33600 if (BYTES_BIG_ENDIAN)
33602 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp0, src1, src2,
33603 GEN_INT (0)));
33604 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp1, src1, src2,
33605 GEN_INT (3)));
33607 else
33609 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (3)));
33610 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (0)));
33613 rtx_tmp2 = gen_reg_rtx (V4SFmode);
33614 rtx_tmp3 = gen_reg_rtx (V4SFmode);
33616 emit_insn (gen_vsx_xvcdpsp (rtx_tmp2, rtx_tmp0));
33617 emit_insn (gen_vsx_xvcdpsp (rtx_tmp3, rtx_tmp1));
33619 if (BYTES_BIG_ENDIAN)
33620 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
33621 else
33622 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
33625 void
33626 rs6000_generate_float2_code (bool signed_convert, rtx dst, rtx src1, rtx src2)
33628 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
33630 rtx_tmp0 = gen_reg_rtx (V2DImode);
33631 rtx_tmp1 = gen_reg_rtx (V2DImode);
33633 /* The destination of the vmrgew instruction layout is:
33634 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
33635 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
33636 vmrgew instruction will be correct. */
33637 if (BYTES_BIG_ENDIAN)
33639 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp0, src1, src2, GEN_INT (0)));
33640 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp1, src1, src2, GEN_INT (3)));
33642 else
33644 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp0, src1, src2, GEN_INT (3)));
33645 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp1, src1, src2, GEN_INT (0)));
33648 rtx_tmp2 = gen_reg_rtx (V4SFmode);
33649 rtx_tmp3 = gen_reg_rtx (V4SFmode);
33651 if (signed_convert)
33653 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp2, rtx_tmp0));
33654 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp3, rtx_tmp1));
33656 else
33658 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp2, rtx_tmp0));
33659 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp3, rtx_tmp1));
33662 if (BYTES_BIG_ENDIAN)
33663 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
33664 else
33665 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
33668 void
33669 rs6000_generate_vsigned2_code (bool signed_convert, rtx dst, rtx src1,
33670 rtx src2)
33672 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
33674 rtx_tmp0 = gen_reg_rtx (V2DFmode);
33675 rtx_tmp1 = gen_reg_rtx (V2DFmode);
33677 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (0)));
33678 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (3)));
33680 rtx_tmp2 = gen_reg_rtx (V4SImode);
33681 rtx_tmp3 = gen_reg_rtx (V4SImode);
33683 if (signed_convert)
33685 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp2, rtx_tmp0));
33686 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp3, rtx_tmp1));
33688 else
33690 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp2, rtx_tmp0));
33691 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp3, rtx_tmp1));
33694 emit_insn (gen_p8_vmrgew_v4si (dst, rtx_tmp2, rtx_tmp3));
33697 /* Implement the TARGET_OPTAB_SUPPORTED_P hook. */
33699 static bool
33700 rs6000_optab_supported_p (int op, machine_mode mode1, machine_mode,
33701 optimization_type opt_type)
33703 switch (op)
33705 case rsqrt_optab:
33706 return (opt_type == OPTIMIZE_FOR_SPEED
33707 && RS6000_RECIP_AUTO_RSQRTE_P (mode1));
33709 default:
33710 return true;
33714 /* Implement TARGET_CONSTANT_ALIGNMENT. */
33716 static HOST_WIDE_INT
33717 rs6000_constant_alignment (const_tree exp, HOST_WIDE_INT align)
33719 if (TREE_CODE (exp) == STRING_CST
33720 && (STRICT_ALIGNMENT || !optimize_size))
33721 return MAX (align, BITS_PER_WORD);
33722 return align;
33725 /* Implement TARGET_STARTING_FRAME_OFFSET. */
33727 static HOST_WIDE_INT
33728 rs6000_starting_frame_offset (void)
33730 if (FRAME_GROWS_DOWNWARD)
33731 return 0;
33732 return RS6000_STARTING_FRAME_OFFSET;
33736 /* Create an alias for a mangled name where we have changed the mangling (in
33737 GCC 8.1, we used U10__float128, and now we use u9__ieee128). This is called
33738 via the target hook TARGET_ASM_GLOBALIZE_DECL_NAME. */
33740 #if TARGET_ELF && RS6000_WEAK
33741 static void
33742 rs6000_globalize_decl_name (FILE * stream, tree decl)
33744 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
33746 targetm.asm_out.globalize_label (stream, name);
33748 if (rs6000_passes_ieee128 && name[0] == '_' && name[1] == 'Z')
33750 tree save_asm_name = DECL_ASSEMBLER_NAME (decl);
33751 const char *old_name;
33753 ieee128_mangling_gcc_8_1 = true;
33754 lang_hooks.set_decl_assembler_name (decl);
33755 old_name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
33756 SET_DECL_ASSEMBLER_NAME (decl, save_asm_name);
33757 ieee128_mangling_gcc_8_1 = false;
33759 if (strcmp (name, old_name) != 0)
33761 fprintf (stream, "\t.weak %s\n", old_name);
33762 fprintf (stream, "\t.set %s,%s\n", old_name, name);
33766 #endif
33769 /* On 64-bit Linux and Freebsd systems, possibly switch the long double library
33770 function names from <foo>l to <foo>f128 if the default long double type is
33771 IEEE 128-bit. Typically, with the C and C++ languages, the standard math.h
33772 include file switches the names on systems that support long double as IEEE
33773 128-bit, but that doesn't work if the user uses __builtin_<foo>l directly.
33774 In the future, glibc will export names like __ieee128_sinf128 and we can
33775 switch to using those instead of using sinf128, which pollutes the user's
33776 namespace.
33778 This will switch the names for Fortran math functions as well (which doesn't
33779 use math.h). However, Fortran needs other changes to the compiler and
33780 library before you can switch the real*16 type at compile time.
33782 We use the TARGET_MANGLE_DECL_ASSEMBLER_NAME hook to change this name. We
33783 only do this if the default is that long double is IBM extended double, and
33784 the user asked for IEEE 128-bit. */
33786 static tree
33787 rs6000_mangle_decl_assembler_name (tree decl, tree id)
33789 if (!TARGET_IEEEQUAD_DEFAULT && TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128
33790 && TREE_CODE (decl) == FUNCTION_DECL && DECL_IS_BUILTIN (decl) )
33792 size_t len = IDENTIFIER_LENGTH (id);
33793 const char *name = IDENTIFIER_POINTER (id);
33795 if (name[len - 1] == 'l')
33797 bool uses_ieee128_p = false;
33798 tree type = TREE_TYPE (decl);
33799 machine_mode ret_mode = TYPE_MODE (type);
33801 /* See if the function returns a IEEE 128-bit floating point type or
33802 complex type. */
33803 if (ret_mode == TFmode || ret_mode == TCmode)
33804 uses_ieee128_p = true;
33805 else
33807 function_args_iterator args_iter;
33808 tree arg;
33810 /* See if the function passes a IEEE 128-bit floating point type
33811 or complex type. */
33812 FOREACH_FUNCTION_ARGS (type, arg, args_iter)
33814 machine_mode arg_mode = TYPE_MODE (arg);
33815 if (arg_mode == TFmode || arg_mode == TCmode)
33817 uses_ieee128_p = true;
33818 break;
33823 /* If we passed or returned an IEEE 128-bit floating point type,
33824 change the name. */
33825 if (uses_ieee128_p)
33827 char *name2 = (char *) alloca (len + 4);
33828 memcpy (name2, name, len - 1);
33829 strcpy (name2 + len - 1, "f128");
33830 id = get_identifier (name2);
33835 return id;
33838 /* Predict whether the given loop in gimple will be transformed in the RTL
33839 doloop_optimize pass. */
33841 static bool
33842 rs6000_predict_doloop_p (struct loop *loop)
33844 gcc_assert (loop);
33846 /* On rs6000, targetm.can_use_doloop_p is actually
33847 can_use_doloop_if_innermost. Just ensure the loop is innermost. */
33848 if (loop->inner != NULL)
33850 if (dump_file && (dump_flags & TDF_DETAILS))
33851 fprintf (dump_file, "Predict doloop failure due to"
33852 " loop nesting.\n");
33853 return false;
33856 return true;
33859 struct gcc_target targetm = TARGET_INITIALIZER;
33861 #include "gt-rs6000.h"